| Kalesh Singh | ce1c3cf | 2024-09-30 13:26:23 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 The Android Open Source Project |
| 3 | * All rights reserved. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions |
| 7 | * are met: |
| 8 | * * Redistributions of source code must retain the above copyright |
| 9 | * notice, this list of conditions and the following disclaimer. |
| 10 | * * Redistributions in binary form must reproduce the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer in |
| 12 | * the documentation and/or other materials provided with the |
| 13 | * distribution. |
| 14 | * |
| 15 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 16 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 17 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| 18 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| 19 | * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, |
| 21 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS |
| 22 | * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
| 23 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
| 24 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT |
| 25 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 26 | * SUCH DAMAGE. |
| 27 | */ |
| 28 | |
| 29 | #include "linker_phdr.h" |
| 30 | |
| Elliott Hughes | 0ec50d8 | 2025-05-09 13:58:56 -0700 | [diff] [blame] | 31 | #include <stdlib.h> |
| Kalesh Singh | ce1c3cf | 2024-09-30 13:26:23 -0700 | [diff] [blame] | 32 | #include <sys/mman.h> |
| 33 | #include <sys/prctl.h> |
| 34 | #include <unistd.h> |
| 35 | |
| 36 | #include "linker_debug.h" |
| 37 | #include "linker_dlwarning.h" |
| 38 | #include "linker_globals.h" |
| 39 | |
| 40 | #include "platform/bionic/macros.h" |
| 41 | #include "platform/bionic/page.h" |
| 42 | |
| Kalesh Singh | 19b89a9 | 2025-03-19 23:14:41 -0700 | [diff] [blame] | 43 | #include <android-base/stringprintf.h> |
| 44 | |
| Elliott Hughes | 0ec50d8 | 2025-05-09 13:58:56 -0700 | [diff] [blame] | 45 | #include <algorithm> |
| 46 | #include <iterator> |
| Kalesh Singh | 19b89a9 | 2025-03-19 23:14:41 -0700 | [diff] [blame] | 47 | #include <numeric> |
| Kalesh Singh | ce1c3cf | 2024-09-30 13:26:23 -0700 | [diff] [blame] | 48 | #include <string> |
| Elliott Hughes | 0ec50d8 | 2025-05-09 13:58:56 -0700 | [diff] [blame] | 49 | #include <vector> |
| Kalesh Singh | ce1c3cf | 2024-09-30 13:26:23 -0700 | [diff] [blame] | 50 | |
| Pawan Wagh | 8e5de06 | 2024-10-17 18:05:19 +0000 | [diff] [blame] | 51 | static bool g_enable_16kb_app_compat; |
| 52 | |
| Kalesh Singh | ce1c3cf | 2024-09-30 13:26:23 -0700 | [diff] [blame] | 53 | static inline bool segment_contains_prefix(const ElfW(Phdr)* segment, const ElfW(Phdr)* prefix) { |
| 54 | return segment && prefix && segment->p_vaddr == prefix->p_vaddr; |
| 55 | } |
| 56 | |
| Pawan Wagh | 8e5de06 | 2024-10-17 18:05:19 +0000 | [diff] [blame] | 57 | void set_16kb_appcompat_mode(bool enable_app_compat) { |
| 58 | g_enable_16kb_app_compat = enable_app_compat; |
| 59 | } |
| 60 | |
| 61 | bool get_16kb_appcompat_mode() { |
| 62 | return g_enable_16kb_app_compat; |
| 63 | } |
| 64 | |
| Kalesh Singh | ce1c3cf | 2024-09-30 13:26:23 -0700 | [diff] [blame] | 65 | /* |
| 66 | * Returns true if the ELF contains at most 1 RELRO segment; and populates @relro_phdr |
| 67 | * with the relro phdr or nullptr if none. |
| 68 | * |
| 69 | * Returns false if more than 1 RELRO segments are found. |
| 70 | */ |
| 71 | bool ElfReader::HasAtMostOneRelroSegment(const ElfW(Phdr)** relro_phdr) { |
| 72 | const ElfW(Phdr)* relro = nullptr; |
| 73 | for (size_t i = 0; i < phdr_num_; ++i) { |
| 74 | const ElfW(Phdr)* phdr = &phdr_table_[i]; |
| 75 | |
| 76 | if (phdr->p_type != PT_GNU_RELRO) { |
| 77 | continue; |
| 78 | } |
| 79 | |
| 80 | if (relro == nullptr) { |
| 81 | relro = phdr; |
| 82 | } else { |
| 83 | return false; |
| 84 | } |
| 85 | } |
| 86 | |
| 87 | *relro_phdr = relro; |
| 88 | |
| 89 | return true; |
| 90 | } |
| 91 | |
| 92 | /* |
| 93 | * In 16KiB compatibility mode ELFs with the following segment layout |
| 94 | * can be loaded successfully: |
| 95 | * |
| 96 | * ┌────────────┬─────────────────────────┬────────────┐ |
| 97 | * │ │ │ │ |
| 98 | * │ (RO|RX)* │ (RW - RELRO prefix)? │ (RW)* │ |
| 99 | * │ │ │ │ |
| 100 | * └────────────┴─────────────────────────┴────────────┘ |
| 101 | * |
| 102 | * In other words, compatible layouts have: |
| 103 | * - zero or more RO or RX segments; |
| 104 | * - followed by zero or one RELRO prefix; |
| 105 | * - followed by zero or more RW segments (this can include the RW |
| 106 | * suffix from the segment containing the RELRO prefix, if any) |
| 107 | * |
| 108 | * In 16KiB compat mode, after relocation, the ELF is layout in virtual |
| 109 | * memory is as shown below: |
| 110 | * ┌──────────────────────────────────────┬────────────┐ |
| 111 | * │ │ │ |
| 112 | * │ (RX)? │ (RW)? │ |
| 113 | * │ │ │ |
| 114 | * └──────────────────────────────────────┴────────────┘ |
| 115 | * |
| 116 | * In compat mode: |
| 117 | * - the RO and RX segments along with the RELRO prefix are protected |
| 118 | * as RX; |
| 119 | * - and the RW segments along with RW suffix from the relro segment, |
| 120 | * if any; are RW protected. |
| 121 | * |
| 122 | * This allows for the single RX|RW permission boundary to be aligned with |
| 123 | * a 16KiB page boundary; since a single page cannot share multiple |
| 124 | * permissions. |
| 125 | * |
| Kalesh Singh | 375ba23 | 2025-07-16 17:14:41 -0700 | [diff] [blame] | 126 | * IsEligibleForRXRWAppCompat() identifies compatible ELFs and populates @vaddr |
| Kalesh Singh | ce1c3cf | 2024-09-30 13:26:23 -0700 | [diff] [blame] | 127 | * with the boundary between RX|RW portions. |
| 128 | * |
| 129 | * Returns true if the ELF can be loaded in compat mode, else false. |
| 130 | */ |
| Kalesh Singh | 375ba23 | 2025-07-16 17:14:41 -0700 | [diff] [blame] | 131 | bool ElfReader::IsEligibleForRXRWAppCompat(ElfW(Addr)* vaddr) { |
| Kalesh Singh | ce1c3cf | 2024-09-30 13:26:23 -0700 | [diff] [blame] | 132 | const ElfW(Phdr)* relro_phdr = nullptr; |
| 133 | if (!HasAtMostOneRelroSegment(&relro_phdr)) { |
| Kalesh Singh | 375ba23 | 2025-07-16 17:14:41 -0700 | [diff] [blame] | 134 | DL_WARN("\"%s\": RX|RW compat loading failed: Multiple RELRO segments found", name_.c_str()); |
| Kalesh Singh | ce1c3cf | 2024-09-30 13:26:23 -0700 | [diff] [blame] | 135 | return false; |
| 136 | } |
| 137 | |
| Kalesh Singh | 6aad7e0 | 2025-06-25 23:58:08 -0700 | [diff] [blame] | 138 | const ElfW(Phdr)* last_rx = nullptr; |
| Kalesh Singh | ce1c3cf | 2024-09-30 13:26:23 -0700 | [diff] [blame] | 139 | const ElfW(Phdr)* last_rw = nullptr; |
| 140 | const ElfW(Phdr)* first_rw = nullptr; |
| 141 | |
| 142 | for (size_t i = 0; i < phdr_num_; ++i) { |
| 143 | const ElfW(Phdr)* curr = &phdr_table_[i]; |
| 144 | const ElfW(Phdr)* prev = (i > 0) ? &phdr_table_[i - 1] : nullptr; |
| 145 | |
| 146 | if (curr->p_type != PT_LOAD) { |
| 147 | continue; |
| 148 | } |
| 149 | |
| 150 | int prot = PFLAGS_TO_PROT(curr->p_flags); |
| 151 | |
| 152 | if ((prot & PROT_WRITE) && (prot & PROT_READ)) { |
| 153 | if (!first_rw) { |
| 154 | first_rw = curr; |
| 155 | } |
| 156 | |
| 157 | if (last_rw && last_rw != prev) { |
| Kalesh Singh | 375ba23 | 2025-07-16 17:14:41 -0700 | [diff] [blame] | 158 | DL_WARN("\"%s\": RX|RW compat loading failed: ELF contains non-adjacent RW segments", |
| Kalesh Singh | ce1c3cf | 2024-09-30 13:26:23 -0700 | [diff] [blame] | 159 | name_.c_str()); |
| 160 | return false; |
| 161 | } |
| 162 | |
| 163 | last_rw = curr; |
| Kalesh Singh | 6aad7e0 | 2025-06-25 23:58:08 -0700 | [diff] [blame] | 164 | } else if ((prot & PROT_EXEC) && (prot & PROT_READ)) { |
| 165 | if (!last_rx || last_rx > last_rw) { |
| 166 | last_rx = curr; |
| 167 | } else { |
| Kalesh Singh | 375ba23 | 2025-07-16 17:14:41 -0700 | [diff] [blame] | 168 | DL_WARN( |
| 169 | "\"%s\": RX|RW compat loading failed: ELF contains RX segments " |
| 170 | "separated by RW segments", |
| 171 | name_.c_str()); |
| Kalesh Singh | 6aad7e0 | 2025-06-25 23:58:08 -0700 | [diff] [blame] | 172 | return false; |
| 173 | } |
| Kalesh Singh | ce1c3cf | 2024-09-30 13:26:23 -0700 | [diff] [blame] | 174 | } |
| 175 | } |
| 176 | |
| 177 | if (!relro_phdr) { |
| Elliott Hughes | fed0ce9 | 2024-12-11 09:53:34 -0800 | [diff] [blame] | 178 | *vaddr = __builtin_align_down(first_rw->p_vaddr, kCompatPageSize); |
| Kalesh Singh | ce1c3cf | 2024-09-30 13:26:23 -0700 | [diff] [blame] | 179 | return true; |
| 180 | } |
| 181 | |
| 182 | // The RELRO segment is present, it must be the prefix of the first RW segment. |
| 183 | if (!segment_contains_prefix(first_rw, relro_phdr)) { |
| Kalesh Singh | 375ba23 | 2025-07-16 17:14:41 -0700 | [diff] [blame] | 184 | DL_WARN("\"%s\": RX|RW compat loading failed: RELRO is not in the first RW segment", |
| Kalesh Singh | ce1c3cf | 2024-09-30 13:26:23 -0700 | [diff] [blame] | 185 | name_.c_str()); |
| 186 | return false; |
| 187 | } |
| 188 | |
| 189 | uint64_t end; |
| 190 | if (__builtin_add_overflow(relro_phdr->p_vaddr, relro_phdr->p_memsz, &end)) { |
| Kalesh Singh | 375ba23 | 2025-07-16 17:14:41 -0700 | [diff] [blame] | 191 | DL_WARN("\"%s\": RX|RW compat loading failed: relro vaddr + memsz overflowed", name_.c_str()); |
| Kalesh Singh | ce1c3cf | 2024-09-30 13:26:23 -0700 | [diff] [blame] | 192 | return false; |
| 193 | } |
| 194 | |
| Elliott Hughes | fed0ce9 | 2024-12-11 09:53:34 -0800 | [diff] [blame] | 195 | *vaddr = __builtin_align_up(end, kCompatPageSize); |
| Kalesh Singh | ce1c3cf | 2024-09-30 13:26:23 -0700 | [diff] [blame] | 196 | return true; |
| 197 | } |
| 198 | |
| 199 | /* |
| Kalesh Singh | 375ba23 | 2025-07-16 17:14:41 -0700 | [diff] [blame] | 200 | * Returns the offset/shift needed to align @vaddr to a page boundary |
| 201 | * for RX|RW compat loading. |
| Kalesh Singh | ce1c3cf | 2024-09-30 13:26:23 -0700 | [diff] [blame] | 202 | */ |
| 203 | static inline ElfW(Addr) perm_boundary_offset(const ElfW(Addr) addr) { |
| 204 | ElfW(Addr) offset = page_offset(addr); |
| 205 | |
| 206 | return offset ? page_size() - offset : 0; |
| 207 | } |
| 208 | |
| Kalesh Singh | 19b89a9 | 2025-03-19 23:14:41 -0700 | [diff] [blame] | 209 | enum relro_pos_t { |
| 210 | NONE, // No RELRO in the LOAD segment |
| 211 | PREFIX, // RELRO is a prefix of the LOAD segment |
| 212 | MIDDLE, // RELRO is contained in the middle of the LOAD segment |
| 213 | SUFFIX, // RELRO is a suffix of the LOAD segment |
| 214 | ENTIRE, // RELRO is the entire LOAD segment |
| 215 | ERROR, // The relro size invalid (spans multiple segments?) |
| 216 | }; |
| 217 | |
| 218 | struct segment { |
| 219 | const ElfW(Phdr)* phdr; |
| 220 | relro_pos_t relro_pos; |
| 221 | }; |
| 222 | |
| 223 | static inline relro_pos_t relro_pos(const ElfW(Phdr)* phdr, const ElfW(Phdr)* relro) { |
| 224 | // For checking the relro boundaries we use instead the LOAD segment's p_align |
| 225 | // instead of the system or compat page size. |
| 226 | uint64_t align = phdr->p_align; |
| 227 | uint64_t seg_start = __builtin_align_down(phdr->p_vaddr, align); |
| 228 | uint64_t seg_end = __builtin_align_up(phdr->p_vaddr + phdr->p_memsz, align); |
| 229 | uint64_t relro_start = __builtin_align_down(relro->p_vaddr, align); |
| 230 | uint64_t relro_end = __builtin_align_up(relro->p_vaddr + relro->p_memsz, align); |
| 231 | |
| 232 | if (relro_end <= seg_start || relro_start >= seg_end) return NONE; |
| 233 | |
| 234 | // Spans multiple LOAD segments? |
| 235 | if (relro_start < seg_start || relro_end > seg_end) return ERROR; |
| 236 | |
| 237 | // Prefix or entire? |
| 238 | if (relro_start == seg_start) return (relro_end < seg_end) ? PREFIX : ENTIRE; |
| 239 | |
| 240 | // Must be suffix or middle |
| 241 | return (relro_end == seg_end) ? SUFFIX : MIDDLE; |
| 242 | } |
| 243 | |
| 244 | static std::vector<struct segment> elf_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count) { |
| 245 | std::vector<struct segment> segments; |
| 246 | |
| 247 | for (size_t index = 0; index < phdr_count; ++index) { |
| 248 | const ElfW(Phdr)* phdr = &phdr_table[index]; |
| 249 | |
| 250 | if (phdr->p_type != PT_LOAD) continue; |
| 251 | |
| 252 | struct segment segment = { |
| 253 | .phdr = phdr, |
| 254 | .relro_pos = NONE, |
| 255 | }; |
| 256 | |
| 257 | segments.emplace_back(segment); |
| 258 | } |
| 259 | |
| 260 | for (size_t index = 0; index < phdr_count; ++index) { |
| 261 | const ElfW(Phdr)* relro = &phdr_table[index]; |
| 262 | |
| 263 | if (relro->p_type != PT_GNU_RELRO) continue; |
| 264 | |
| 265 | for (struct segment& segment : segments) { |
| 266 | if (segment.relro_pos != NONE) continue; |
| 267 | |
| 268 | segment.relro_pos = relro_pos(segment.phdr, relro); |
| 269 | } |
| 270 | } |
| 271 | |
| 272 | // Sort by vaddr |
| 273 | std::sort(segments.begin(), segments.end(), [](const struct segment& a, const struct segment& b) { |
| 274 | return a.phdr->p_vaddr < b.phdr->p_vaddr; |
| 275 | }); |
| 276 | |
| 277 | return segments; |
| 278 | } |
| 279 | |
| 280 | static inline std::string prot_str(const struct segment& segment) { |
| 281 | int prot = PFLAGS_TO_PROT(segment.phdr->p_flags); |
| 282 | std::string str; |
| 283 | |
| 284 | if (prot & PROT_READ) str += "R"; |
| 285 | if (prot & PROT_WRITE) str += "W"; |
| 286 | if (prot & PROT_EXEC) str += "X"; |
| 287 | |
| 288 | return str; |
| 289 | } |
| 290 | |
| 291 | static inline std::string relro_pos_str(const struct segment& segment) { |
| 292 | relro_pos_t relro_pos = segment.relro_pos; |
| 293 | |
| 294 | switch (relro_pos) { |
| 295 | case NONE: |
| 296 | return ""; |
| 297 | case PREFIX: |
| 298 | return "(PREFIX)"; |
| 299 | case MIDDLE: |
| 300 | return "(MIDDLE)"; |
| 301 | case SUFFIX: |
| 302 | return "(SUFFIX)"; |
| 303 | case ENTIRE: |
| 304 | return "(ENTIRE)"; |
| 305 | case ERROR: |
| 306 | return "(ERROR)"; |
| 307 | } |
| 308 | |
| 309 | // Unreachable |
| Elliott Hughes | 0ec50d8 | 2025-05-09 13:58:56 -0700 | [diff] [blame] | 310 | abort(); |
| Kalesh Singh | 19b89a9 | 2025-03-19 23:14:41 -0700 | [diff] [blame] | 311 | } |
| 312 | |
| 313 | static inline std::string segment_format(const struct segment& segment) { |
| 314 | uint64_t align_kbytes = segment.phdr->p_align / 1024; |
| 315 | std::string format = prot_str(segment); |
| 316 | |
| 317 | if (segment.relro_pos != NONE) format += " " + relro_pos_str(segment); |
| 318 | |
| 319 | return format + " " + std::to_string(align_kbytes) + "K"; |
| 320 | } |
| 321 | |
| 322 | /* |
| 323 | * Returns a string representing the ELF's load segment layout. |
| 324 | * |
| 325 | * Each segment has the format: <permissions> [(<relro position>)] <p_align> |
| 326 | * |
| 327 | * e.g. "RX 4K|RW (ENTIRE) 4K|RW 4K|RW 16K|RX 16K|R 16K|RW 16K" |
| 328 | */ |
| 329 | static inline std::string elf_layout(const ElfW(Phdr)* phdr_table, size_t phdr_count) { |
| 330 | std::vector<struct segment> segments = elf_segments(phdr_table, phdr_count); |
| 331 | std::vector<std::string> layout; |
| 332 | |
| 333 | for (struct segment& segment : segments) { |
| 334 | layout.emplace_back(segment_format(segment)); |
| 335 | } |
| 336 | |
| 337 | if (layout.empty()) return ""; |
| 338 | |
| 339 | return std::accumulate(std::next(layout.begin()), layout.end(), layout[0], |
| 340 | [](std::string a, std::string b) { return std::move(a) + "," + b; }); |
| 341 | } |
| 342 | |
| Kalesh Singh | 4854b4a | 2025-07-16 18:00:23 -0700 | [diff] [blame] | 343 | void ElfReader::LabelCompatVma() { |
| 344 | // Label the ELF VMA, since compat mode uses anonymous mappings, and some applications may rely on |
| 345 | // them having their name set to the ELF's path. |
| 346 | // Since kernel 5.10 it is safe to use non-global storage for the VMA name because it will be |
| 347 | // copied into the kernel. 16KiB pages require a minimum kernel version of 6.1 so we can safely |
| 348 | // use a stack-allocated buffer here. |
| 349 | char vma_name_buffer[kVmaNameLimit] = {}; |
| 350 | format_left_truncated_vma_anon_name(vma_name_buffer, sizeof(vma_name_buffer), |
| 351 | "16k:", name_.c_str(), ""); |
| 352 | if (prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, load_start_, load_size_, vma_name_buffer) != 0) { |
| 353 | DL_WARN("\"%s\": Failed to rename 16KiB compat segment: %m", name_.c_str()); |
| 354 | } |
| 355 | } |
| 356 | |
| Kalesh Singh | a7c79ac | 2025-07-16 22:10:36 -0700 | [diff] [blame] | 357 | void ElfReader::SetupRXRWAppCompat(ElfW(Addr) rx_rw_boundary) { |
| Kalesh Singh | ce1c3cf | 2024-09-30 13:26:23 -0700 | [diff] [blame] | 358 | // Adjust the load_bias to position the RX|RW boundary on a page boundary |
| 359 | load_bias_ += perm_boundary_offset(rx_rw_boundary); |
| 360 | |
| 361 | // RW region (.data, .bss ...) |
| 362 | ElfW(Addr) rw_start = load_bias_ + rx_rw_boundary; |
| Csanád Hajdú | bee0bbd | 2025-09-01 11:58:01 +0200 | [diff] [blame] | 363 | CHECK(rw_start % page_size() == 0); |
| Kalesh Singh | ce1c3cf | 2024-09-30 13:26:23 -0700 | [diff] [blame] | 364 | |
| Csanád Hajdú | bee0bbd | 2025-09-01 11:58:01 +0200 | [diff] [blame] | 365 | // Compat code and RELRO (RX) region (.text, .data.relro, ...) |
| 366 | compat_code_start_ = load_start(); |
| 367 | compat_code_size_ = rw_start - load_start(); |
| Kalesh Singh | a7c79ac | 2025-07-16 22:10:36 -0700 | [diff] [blame] | 368 | } |
| 369 | |
| Csanád Hajdú | bee0bbd | 2025-09-01 11:58:01 +0200 | [diff] [blame] | 370 | void ElfReader::SetupRWXAppCompat() { |
| Kalesh Singh | 120ac66 | 2025-07-16 22:20:20 -0700 | [diff] [blame] | 371 | // Warn and fallback to RWX mapping |
| 372 | const std::string layout = elf_layout(phdr_table_, phdr_num_); |
| 373 | DL_WARN("\"%s\": RX|RW compat loading failed, falling back to RWX compat: load segments [%s]", |
| 374 | name_.c_str(), layout.c_str()); |
| Csanád Hajdú | bee0bbd | 2025-09-01 11:58:01 +0200 | [diff] [blame] | 375 | compat_code_start_ = load_start(); |
| 376 | compat_code_size_ = load_size(); |
| Kalesh Singh | 120ac66 | 2025-07-16 22:20:20 -0700 | [diff] [blame] | 377 | } |
| 378 | |
| 379 | bool ElfReader::Setup16KiBAppCompat() { |
| Kalesh Singh | a7c79ac | 2025-07-16 22:10:36 -0700 | [diff] [blame] | 380 | if (!should_use_16kib_app_compat_) { |
| Kalesh Singh | 120ac66 | 2025-07-16 22:20:20 -0700 | [diff] [blame] | 381 | return true; |
| Kalesh Singh | a7c79ac | 2025-07-16 22:10:36 -0700 | [diff] [blame] | 382 | } |
| 383 | |
| 384 | ElfW(Addr) rx_rw_boundary; // Permission boundary for RX|RW compat mode |
| 385 | if (IsEligibleForRXRWAppCompat(&rx_rw_boundary)) { |
| 386 | SetupRXRWAppCompat(rx_rw_boundary); |
| Csanád Hajdú | bee0bbd | 2025-09-01 11:58:01 +0200 | [diff] [blame] | 387 | } else { |
| 388 | should_16kib_app_compat_use_rwx_ = true; |
| 389 | SetupRWXAppCompat(); |
| Kalesh Singh | a7c79ac | 2025-07-16 22:10:36 -0700 | [diff] [blame] | 390 | } |
| 391 | |
| Kalesh Singh | 4854b4a | 2025-07-16 18:00:23 -0700 | [diff] [blame] | 392 | LabelCompatVma(); |
| Kalesh Singh | 120ac66 | 2025-07-16 22:20:20 -0700 | [diff] [blame] | 393 | return true; |
| Kalesh Singh | ce1c3cf | 2024-09-30 13:26:23 -0700 | [diff] [blame] | 394 | } |
| 395 | |
| 396 | bool ElfReader::CompatMapSegment(size_t seg_idx, size_t len) { |
| 397 | const ElfW(Phdr)* phdr = &phdr_table_[seg_idx]; |
| 398 | |
| 399 | // NOTE: The compat(legacy) page size (4096) must be used when aligning |
| 400 | // the 4KiB segments for loading (reading). The larger 16KiB page size |
| 401 | // will lead to overwriting adjacent segments since the ELF's segment(s) |
| 402 | // are not 16KiB aligned. |
| 403 | |
| Elliott Hughes | fed0ce9 | 2024-12-11 09:53:34 -0800 | [diff] [blame] | 404 | void* start = reinterpret_cast<void*>(__builtin_align_down(phdr->p_vaddr + load_bias_, kCompatPageSize)); |
| Kalesh Singh | ce1c3cf | 2024-09-30 13:26:23 -0700 | [diff] [blame] | 405 | |
| 406 | // The ELF could be being loaded directly from a zipped APK, |
| 407 | // the zip offset must be added to find the segment offset. |
| Elliott Hughes | fed0ce9 | 2024-12-11 09:53:34 -0800 | [diff] [blame] | 408 | const ElfW(Addr) offset = file_offset_ + __builtin_align_down(phdr->p_offset, kCompatPageSize); |
| Kalesh Singh | ce1c3cf | 2024-09-30 13:26:23 -0700 | [diff] [blame] | 409 | |
| Kalesh Singh | ce1c3cf | 2024-09-30 13:26:23 -0700 | [diff] [blame] | 410 | CHECK(should_use_16kib_app_compat_); |
| 411 | |
| 412 | // Since the 4KiB max-page-size ELF is not properly aligned, loading it by |
| 413 | // directly mmapping the ELF file is not feasible. |
| 414 | // Instead, read the ELF contents into the anonymous RW mapping. |
| 415 | if (TEMP_FAILURE_RETRY(pread64(fd_, start, len, offset)) == -1) { |
| 416 | DL_ERR("Compat loading: \"%s\" failed to read LOAD segment %zu: %m", name_.c_str(), seg_idx); |
| 417 | return false; |
| 418 | } |
| 419 | |
| 420 | return true; |
| 421 | } |
| Csanád Hajdú | 89032c4 | 2025-06-03 20:36:32 -0700 | [diff] [blame] | 422 | |
| 423 | static size_t phdr_table_get_relro_min_align(const ElfW(Phdr)* relro_phdr, |
| 424 | const ElfW(Phdr)* phdr_table, size_t phdr_count) { |
| 425 | for (size_t index = 0; index < phdr_count; ++index) { |
| 426 | const ElfW(Phdr)* phdr = &phdr_table[index]; |
| 427 | |
| 428 | if (phdr->p_type != PT_LOAD) { |
| 429 | continue; |
| 430 | } |
| 431 | |
| 432 | // Only check for the case, where the relro segment is a prefix of a load segment. Conventional |
| 433 | // linkers will only generate binaries where the relro segment is either the prefix of the first |
| 434 | // RW load segment, or is entirely contained in the first RW segment. |
| 435 | if (phdr->p_vaddr == relro_phdr->p_vaddr) { |
| 436 | // No extra alignment checks needed if the whole load segment is relro. |
| 437 | if (phdr->p_memsz <= relro_phdr->p_memsz) { |
| 438 | return 0; |
| 439 | } |
| 440 | |
| 441 | ElfW(Addr) relro_end = relro_phdr->p_vaddr + relro_phdr->p_memsz; |
| 442 | // Alignments must be powers of two, so the RELRO segment's alignment can be determined by |
| 443 | // calculating its lowest set bit with (n & -n). |
| 444 | size_t relro_align = static_cast<size_t>(relro_end & -relro_end); |
| 445 | // We only care about relro segments that are aligned to at least 4KiB. This is always |
| 446 | // expected for outputs of a conventional linker. |
| 447 | return relro_align >= kCompatPageSize ? relro_align : 0; |
| 448 | } |
| 449 | } |
| 450 | return 0; |
| 451 | } |
| 452 | |
| 453 | /* |
| 454 | * In the base page size is 16KiB and the RELRO's end alignment is less than min_align_; |
| 455 | * override min_align_ with the relro's end alignment. This ensures that the ELF is |
| 456 | * loaded in compat mode even if the LOAD segments are 16KB aligned. |
| 457 | * Linker bug: https://sourceware.org/bugzilla/show_bug.cgi?id=28824 |
| 458 | */ |
| 459 | void ElfReader::FixMinAlignFor16KiB() { |
| 460 | // A binary with LOAD segment alignments of at least 16KiB can still be incompatible with 16KiB |
| 461 | // page sizes if the first RW segment has a RELRO prefix ending at a non-16KiB-aligned address. We |
| 462 | // need to check for this possibility here and adjust min_align_ accordingly. |
| 463 | // We only check if the ELF file contains a single RELRO segment, because that's what the 16KiB |
| 464 | // compatibility loader can handle. |
| 465 | const ElfW(Phdr)* relro_phdr = nullptr; |
| 466 | if (HasAtMostOneRelroSegment(&relro_phdr) && relro_phdr != nullptr) { |
| 467 | size_t relro_min_align = phdr_table_get_relro_min_align(relro_phdr, phdr_table_, phdr_num_); |
| 468 | if (relro_min_align) { |
| 469 | min_align_ = std::min(min_align_, relro_min_align); |
| 470 | } |
| 471 | } |
| 472 | } |
| Csanád Hajdú | bee0bbd | 2025-09-01 11:58:01 +0200 | [diff] [blame] | 473 | |
| 474 | /* |
| 475 | * Apply RX or RWX protection to the code region of the ELF being loaded in |
| 476 | * 16KiB compat mode. |
| 477 | * |
| 478 | * Input: |
| Csanád Hajdú | 1473ca6 | 2025-09-01 12:29:54 +0200 | [diff] [blame] | 479 | * start -> start address of the compat code region. |
| 480 | * size -> size of the compat code region in bytes. |
| 481 | * should_16kib_app_compat_use_rwx -> use RWX or RX permission. |
| 482 | * note_gnu_property -> AArch64-only: use PROT_BTI if the ELF is BTI-compatible. |
| Csanád Hajdú | bee0bbd | 2025-09-01 11:58:01 +0200 | [diff] [blame] | 483 | * Return: |
| 484 | * 0 on success, -1 on failure (error code in errno). |
| 485 | */ |
| 486 | int phdr_table_protect_16kib_app_compat_code(ElfW(Addr) start, ElfW(Addr) size, |
| Csanád Hajdú | 1473ca6 | 2025-09-01 12:29:54 +0200 | [diff] [blame] | 487 | bool should_16kib_app_compat_use_rwx, |
| 488 | const GnuPropertySection* note_gnu_property __unused) { |
| Csanád Hajdú | bee0bbd | 2025-09-01 11:58:01 +0200 | [diff] [blame] | 489 | int prot = PROT_READ | PROT_EXEC; |
| 490 | if (should_16kib_app_compat_use_rwx) { |
| 491 | prot |= PROT_WRITE; |
| 492 | } |
| Csanád Hajdú | 1473ca6 | 2025-09-01 12:29:54 +0200 | [diff] [blame] | 493 | #ifdef __aarch64__ |
| 494 | if (note_gnu_property != nullptr && note_gnu_property->IsBTICompatible()) { |
| 495 | prot |= PROT_BTI; |
| 496 | } |
| 497 | #endif |
| Csanád Hajdú | bee0bbd | 2025-09-01 11:58:01 +0200 | [diff] [blame] | 498 | return mprotect(reinterpret_cast<void*>(start), size, prot); |
| 499 | } |