Message ID | b37a0e40db013871109af6cba09f79ba92b9594d.1618301209.git.szabolcs.nagy@arm.com |
---|---|
State | New |
Headers | show |
Series | Dynamic TLS related data race fixes | expand |
On Tue, Apr 13, 2021 at 2:31 AM Szabolcs Nagy via Libc-alpha <libc-alpha@sourceware.org> wrote: > > Lazy tlsdesc relocation is racy because the static tls optimization and > tlsdesc management operations are done without holding the dlopen lock. > > This similar to the commit b7cf203b5c17dd6d9878537d41e0c7cc3d270a67 > for aarch64, but it fixes a different race: bug 27137. > > Another issue is that ld auditing ignores DT_BIND_NOW and thus tries to > relocate tlsdesc lazily, but that does not work in a BIND_NOW module > due to missing DT_TLSDESC_PLT. Unconditionally relocating tlsdesc at > load time fixes this bug 27721 too. > > -- > v2: > - mention the ldaudit issue with bindnow and tlsdesc. > --- > sysdeps/x86_64/dl-machine.h | 19 ++++++++++++++----- > 1 file changed, 14 insertions(+), 5 deletions(-) > > diff --git a/sysdeps/x86_64/dl-machine.h b/sysdeps/x86_64/dl-machine.h > index 103eee6c3f..9a876a371e 100644 > --- a/sysdeps/x86_64/dl-machine.h > +++ b/sysdeps/x86_64/dl-machine.h > @@ -570,12 +570,21 @@ elf_machine_lazy_rel (struct link_map *map, > } > else if (__glibc_likely (r_type == R_X86_64_TLSDESC)) > { > - struct tlsdesc volatile * __attribute__((__unused__)) td = > - (struct tlsdesc volatile *)reloc_addr; > + const Elf_Symndx symndx = ELFW (R_SYM) (reloc->r_info); > + const ElfW (Sym) *symtab = (const void *)D_PTR (map, l_info[DT_SYMTAB]); > + const ElfW (Sym) *sym = &symtab[symndx]; > + const struct r_found_version *version = NULL; > > - td->arg = (void*)reloc; > - td->entry = (void*)(D_PTR (map, l_info[ADDRIDX (DT_TLSDESC_PLT)]) > - + map->l_addr); > + if (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL) > + { > + const ElfW (Half) *vernum = > + (const void *)D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]); > + version = &map->l_versions[vernum[symndx] & 0x7fff]; > + } > + > + /* Always initialize TLS descriptors completely at load time, in > + case static TLS is allocated for it that requires locking. */ > + elf_machine_rela (map, reloc, sym, version, reloc_addr, skip_ifunc); > } > else if (__glibc_unlikely (r_type == R_X86_64_IRELATIVE)) > { > -- > 2.17.1 > LGTM. Thanks.
diff --git a/sysdeps/x86_64/dl-machine.h b/sysdeps/x86_64/dl-machine.h index 103eee6c3f..9a876a371e 100644 --- a/sysdeps/x86_64/dl-machine.h +++ b/sysdeps/x86_64/dl-machine.h @@ -570,12 +570,21 @@ elf_machine_lazy_rel (struct link_map *map, } else if (__glibc_likely (r_type == R_X86_64_TLSDESC)) { - struct tlsdesc volatile * __attribute__((__unused__)) td = - (struct tlsdesc volatile *)reloc_addr; + const Elf_Symndx symndx = ELFW (R_SYM) (reloc->r_info); + const ElfW (Sym) *symtab = (const void *)D_PTR (map, l_info[DT_SYMTAB]); + const ElfW (Sym) *sym = &symtab[symndx]; + const struct r_found_version *version = NULL; - td->arg = (void*)reloc; - td->entry = (void*)(D_PTR (map, l_info[ADDRIDX (DT_TLSDESC_PLT)]) - + map->l_addr); + if (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL) + { + const ElfW (Half) *vernum = + (const void *)D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]); + version = &map->l_versions[vernum[symndx] & 0x7fff]; + } + + /* Always initialize TLS descriptors completely at load time, in + case static TLS is allocated for it that requires locking. */ + elf_machine_rela (map, reloc, sym, version, reloc_addr, skip_ifunc); } else if (__glibc_unlikely (r_type == R_X86_64_IRELATIVE)) {