Coverage for src/debputy/deb_packaging_support.py: 24%

836 statements  

« prev     ^ index     » next       coverage.py v7.8.2, created at 2026-02-26 22:30 +0000

1import collections 

2import contextlib 

3import dataclasses 

4import datetime 

5import functools 

6import hashlib 

7import itertools 

8import operator 

9import os 

10import re 

11import shutil 

12import subprocess 

13import tempfile 

14import textwrap 

15from contextlib import ExitStack, suppress 

16from tempfile import mkstemp 

17from typing import ( 

18 Literal, 

19 TypeVar, 

20 cast, 

21 Any, 

22 AbstractSet, 

23 TYPE_CHECKING, 

24) 

25from collections.abc import Iterable, Sequence, Iterator, Mapping 

26 

27import debian.deb822 

28from debian.changelog import Changelog 

29from debian.deb822 import Deb822 

30from debputy._deb_options_profiles import DebBuildOptionsAndProfiles 

31from debputy.architecture_support import DpkgArchitectureBuildProcessValuesTable 

32from debputy.elf_util import find_all_elf_files, ELF_MAGIC 

33from debputy.exceptions import DebputyDpkgGensymbolsError, PureVirtualPathError 

34from debputy.filesystem_scan import ( 

35 FSControlRootDir, 

36 VirtualPathBase, 

37 InMemoryVirtualPathBase, 

38) 

39from debputy.maintscript_snippet import ( 

40 ALL_CONTROL_SCRIPTS, 

41 MaintscriptSnippetContainer, 

42 STD_CONTROL_SCRIPTS, 

43) 

44from debputy.packager_provided_files import PackagerProvidedFile 

45from debputy.packages import BinaryPackage, SourcePackage 

46from debputy.packaging.alternatives import process_alternatives 

47from debputy.packaging.debconf_templates import process_debconf_templates 

48from debputy.packaging.makeshlibs import ( 

49 compute_shlibs, 

50 ShlibsContent, 

51 generate_shlib_dirs, 

52 resolve_reserved_provided_file, 

53) 

54from debputy.plugin.api.feature_set import PluginProvidedFeatureSet 

55from debputy.plugin.api.impl import ServiceRegistryImpl 

56from debputy.plugin.api.impl_types import ( 

57 MetadataOrMaintscriptDetector, 

58 PackageDataTable, 

59 ServiceManagerDetails, 

60) 

61from debputy.plugin.api.spec import ( 

62 FlushableSubstvars, 

63 VirtualPath, 

64 PackageProcessingContext, 

65 ServiceDefinition, 

66) 

67from debputy.plugins.debputy.binary_package_rules import ServiceRule 

68from debputy.util import ( 

69 _error, 

70 ensure_dir, 

71 assume_not_none, 

72 resolve_perl_config, 

73 perlxs_api_dependency, 

74 detect_fakeroot, 

75 grouper, 

76 _info, 

77 xargs, 

78 escape_shell, 

79 generated_content_dir, 

80 print_command, 

81 _warn, 

82) 

83 

84if TYPE_CHECKING: 

85 from debputy.highlevel_manifest import ( 

86 HighLevelManifest, 

87 PackageTransformationDefinition, 

88 BinaryPackageData, 

89 ) 

90 

91 

92VP = TypeVar("VP", bound=VirtualPath, covariant=True) 

93 

94_T64_REGEX = re.compile("^lib.*t64(?:-nss)?$") 

95_T64_PROVIDES = "t64:Provides" 

96 

97 

98def generate_md5sums_file( 

99 control_output_dir: VirtualPathBase, 

100 fs_root: VirtualPath, 

101) -> None: 

102 conffiles = control_output_dir.get("conffiles") 

103 exclude = set() 

104 if conffiles and conffiles.is_file: 

105 with conffiles.open() as fd: 

106 for line in fd: 

107 if not line.startswith("/"): 

108 continue 

109 exclude.add("." + line.rstrip("\n")) 

110 files_to_checksum = sorted( 

111 ( 

112 path 

113 for path in fs_root.all_paths() 

114 if path.is_file and path.path not in exclude 

115 ), 

116 # Sort in the same order as dh_md5sums, which is not quite the same as dpkg/`all_paths()` 

117 # Compare `.../doc/...` vs `.../doc-base/...` if you want to see the difference between 

118 # the two approaches. 

119 key=lambda p: p.path, 

120 ) 

121 if not files_to_checksum: 

122 return 

123 with control_output_dir.open_child("md5sums", "w") as md5fd: 

124 for member in files_to_checksum: 

125 path = member.path 

126 assert path.startswith("./") 

127 path = path[2:] 

128 with member.open(byte_io=True) as f: 

129 file_hash = hashlib.md5() 

130 while chunk := f.read(8192): 

131 file_hash.update(chunk) 

132 md5fd.write(f"{file_hash.hexdigest()} {path}\n") 

133 

134 

135def install_or_generate_conffiles( 

136 ctrl_root: InMemoryVirtualPathBase | FSControlRootDir, 

137 fs_root: VirtualPath, 

138 reserved_packager_provided_files: dict[str, list[PackagerProvidedFile]], 

139) -> None: 

140 provided_conffiles_file = resolve_reserved_provided_file( 

141 "conffiles", 

142 reserved_packager_provided_files, 

143 ) 

144 if ( 144 ↛ 149line 144 didn't jump to line 149 because the condition on line 144 was never true

145 provided_conffiles_file 

146 and provided_conffiles_file.is_file 

147 and provided_conffiles_file.size > 0 

148 ): 

149 ctrl_root.insert_file_from_fs_path( 

150 "conffiles", 

151 provided_conffiles_file.fs_path, 

152 mode=0o644, 

153 reference_path=provided_conffiles_file, 

154 ) 

155 etc_dir = fs_root.lookup("etc") 

156 if etc_dir: 

157 _add_conffiles(ctrl_root, (p for p in etc_dir.all_paths() if p.is_file)) 

158 

159 

160PERL_DEP_PROGRAM = 1 

161PERL_DEP_INDEP_PM_MODULE = 2 

162PERL_DEP_XS_MODULE = 4 

163PERL_DEP_ARCH_PM_MODULE = 8 

164PERL_DEP_MA_ANY_INCOMPATIBLE_TYPES = ~(PERL_DEP_PROGRAM | PERL_DEP_INDEP_PM_MODULE) 

165 

166 

167@functools.lru_cache(2) # In practice, param will be "perl" or "perl-base" 

168def _dpkg_perl_version(package: str) -> str: 

169 dpkg_version = None 

170 lines = ( 

171 subprocess.check_output(["dpkg", "-s", package]) 

172 .decode("utf-8") 

173 .splitlines(keepends=False) 

174 ) 

175 for line in lines: 

176 if line.startswith("Version: "): 

177 dpkg_version = line[8:].strip() 

178 break 

179 assert dpkg_version is not None 

180 return dpkg_version 

181 

182 

183def handle_perl_code( 

184 dctrl_bin: BinaryPackage, 

185 dpkg_architecture_variables: DpkgArchitectureBuildProcessValuesTable, 

186 fs_root: InMemoryVirtualPathBase, 

187 substvars: FlushableSubstvars, 

188) -> None: 

189 perl_config_data = resolve_perl_config(dpkg_architecture_variables, dctrl_bin) 

190 detected_dep_requirements = 0 

191 

192 # MakeMaker always makes lib and share dirs, but typically only one directory is actually used. 

193 for perl_inc_dir in (perl_config_data.vendorarch, perl_config_data.vendorlib): 

194 p = fs_root.lookup(perl_inc_dir) 

195 if p and p.is_dir: 

196 p.prune_if_empty_dir() 

197 

198 # FIXME: 80% of this belongs in a metadata detector, but that requires us to expose .walk() in the public API, 

199 # which will not be today. 

200 for d, pm_mode in [ 

201 (perl_config_data.vendorlib, PERL_DEP_INDEP_PM_MODULE), 

202 (perl_config_data.vendorarch, PERL_DEP_ARCH_PM_MODULE), 

203 ]: 

204 inc_dir = fs_root.lookup(d) 

205 if not inc_dir: 

206 continue 

207 for path in inc_dir.all_paths(): 

208 if not path.is_file: 

209 continue 

210 if path.name.endswith(".so"): 

211 detected_dep_requirements |= PERL_DEP_XS_MODULE 

212 elif path.name.endswith(".pm"): 

213 detected_dep_requirements |= pm_mode 

214 

215 for path, children in fs_root.walk(): 

216 if path.path == "./usr/share/doc": 

217 children.clear() 

218 continue 

219 if ( 

220 not path.is_file 

221 or not path.has_fs_path 

222 or not (path.is_executable or path.name.endswith(".pl")) 

223 ): 

224 continue 

225 

226 interpreter = path.interpreter() 

227 if interpreter is not None and interpreter.command_full_basename == "perl": 

228 detected_dep_requirements |= PERL_DEP_PROGRAM 

229 

230 if not detected_dep_requirements: 

231 return 

232 dpackage = "perl" 

233 # FIXME: Currently, dh_perl supports perl-base via manual toggle. 

234 

235 dependency = dpackage 

236 if not (detected_dep_requirements & PERL_DEP_MA_ANY_INCOMPATIBLE_TYPES): 

237 dependency += ":any" 

238 

239 if detected_dep_requirements & PERL_DEP_XS_MODULE: 

240 dpkg_version = _dpkg_perl_version(dpackage) 

241 dependency += f" (>= {dpkg_version})" 

242 substvars.add_dependency("perl:Depends", dependency) 

243 

244 if detected_dep_requirements & (PERL_DEP_XS_MODULE | PERL_DEP_ARCH_PM_MODULE): 

245 substvars.add_dependency("perl:Depends", perlxs_api_dependency()) 

246 

247 

248def usr_local_transformation(dctrl: BinaryPackage, fs_root: VirtualPath) -> None: 

249 path = fs_root.lookup("./usr/local") 

250 if path and any(path.iterdir()): 

251 # There are two key issues: 

252 # 1) Getting the generated maintscript carried on to the final maintscript 

253 # 2) Making sure that manifest created directories do not trigger the "unused error". 

254 _error( 

255 f"Replacement of /usr/local paths is currently not supported in debputy (triggered by: {dctrl.name})." 

256 ) 

257 

258 

259def _find_and_analyze_systemd_service_files( 

260 fs_root: VirtualPath, 

261 systemd_service_dir: Literal["system", "user"], 

262) -> Iterable[VirtualPath]: 

263 service_dirs = [ 

264 f"./usr/lib/systemd/{systemd_service_dir}", 

265 f"./lib/systemd/{systemd_service_dir}", 

266 ] 

267 aliases: dict[str, list[str]] = collections.defaultdict(list) 

268 seen = set() 

269 all_files = [] 

270 

271 for d in service_dirs: 

272 system_dir = fs_root.lookup(d) 

273 if not system_dir: 

274 continue 

275 for child in system_dir.iterdir(): 

276 if child.is_symlink: 

277 dest = os.path.basename(child.readlink()) 

278 aliases[dest].append(child.name) 

279 elif child.is_file and child.name not in seen: 

280 seen.add(child.name) 

281 all_files.append(child) 

282 

283 return all_files 

284 

285 

286def detect_systemd_user_service_files( 

287 dctrl: BinaryPackage, 

288 fs_root: VirtualPath, 

289) -> None: 

290 for service_file in _find_and_analyze_systemd_service_files(fs_root, "user"): 

291 _error( 

292 f'Sorry, systemd user services files are not supported at the moment (saw "{service_file.path}"' 

293 f" in {dctrl.name})" 

294 ) 

295 

296 

297# Generally, this should match the release date of oldstable or oldoldstable 

298_DCH_PRUNE_CUT_OFF_DATE = datetime.date(2019, 7, 6) 

299_DCH_MIN_NUM_OF_ENTRIES = 4 

300 

301 

302def _prune_dch_file( 

303 package: BinaryPackage, 

304 path: VirtualPath, 

305 is_changelog: bool, 

306 keep_versions: set[str] | None, 

307 *, 

308 trim: bool = True, 

309) -> tuple[bool, set[str] | None]: 

310 # TODO: Process `d/changelog` once 

311 # Note we cannot assume that changelog_file is always `d/changelog` as you can have 

312 # per-package changelogs. 

313 with path.open() as fd: 

314 dch = Changelog(fd) 

315 shortened = False 

316 important_entries = 0 

317 binnmu_entries = [] 

318 if is_changelog: 

319 kept_entries = [] 

320 for block in dch: 

321 if block.other_pairs.get("binary-only", "no") == "yes": 

322 # Always keep binNMU entries (they are always in the top) and they do not count 

323 # towards our kept_entries limit 

324 binnmu_entries.append(block) 

325 continue 

326 block_date = block.date 

327 if block_date is None: 

328 _error("The Debian changelog was missing date in sign off line") 

329 try: 

330 entry_date = datetime.datetime.strptime( 

331 block_date, "%a, %d %b %Y %H:%M:%S %z" 

332 ).date() 

333 except ValueError: 

334 _error( 

335 f"Invalid date in the changelog entry for version {block.version}: {block_date!r} (Expected format: 'Thu, 26 Feb 2026 00:00:00 +0000')" 

336 ) 

337 if ( 

338 trim 

339 and entry_date < _DCH_PRUNE_CUT_OFF_DATE 

340 and important_entries >= _DCH_MIN_NUM_OF_ENTRIES 

341 ): 

342 shortened = True 

343 break 

344 # Match debhelper in incrementing after the check. 

345 important_entries += 1 

346 kept_entries.append(block) 

347 else: 

348 assert keep_versions is not None 

349 # The NEWS files should match the version for the dch to avoid lintian warnings. 

350 # If that means we remove all entries in the NEWS file, then we delete the NEWS 

351 # file (see #1021607) 

352 kept_entries = [b for b in dch if b.version in keep_versions] 

353 shortened = len(dch) > len(kept_entries) 

354 if shortened and not kept_entries: 

355 path.unlink() 

356 return True, None 

357 

358 if not shortened and not binnmu_entries: 

359 return False, None 

360 

361 parent_dir = assume_not_none(path.parent_dir) 

362 

363 with ( 

364 path.replace_fs_path_content() as fs_path, 

365 open(fs_path, "w", encoding="utf-8") as fd, 

366 ): 

367 for entry in kept_entries: 

368 fd.write(str(entry)) 

369 

370 if is_changelog and shortened: 

371 # For changelog (rather than NEWS) files, add a note about how to 

372 # get the full version. 

373 msg = textwrap.dedent( 

374 f"""\ 

375 # Older entries have been removed from this changelog. 

376 # To read the complete changelog use `apt changelog {package.name}`. 

377 """ 

378 ) 

379 fd.write(msg) 

380 

381 if binnmu_entries: 

382 if package.is_arch_all: 

383 _error( 

384 f"The package {package.name} is architecture all, but it is built during a binNMU. A binNMU build" 

385 " must not include architecture all packages" 

386 ) 

387 

388 with ( 

389 parent_dir.add_file( 

390 f"{path.name}.{package.resolved_architecture}" 

391 ) as binnmu_changelog, 

392 open( 

393 binnmu_changelog.fs_path, 

394 "w", 

395 encoding="utf-8", 

396 ) as binnmu_fd, 

397 ): 

398 for entry in binnmu_entries: 

399 binnmu_fd.write(str(entry)) 

400 

401 if not shortened: 

402 return False, None 

403 return True, {b.version for b in kept_entries} 

404 

405 

406def fixup_debian_changelog_and_news_file( 

407 dctrl: BinaryPackage, 

408 fs_root: VirtualPath, 

409 is_native: bool, 

410 build_env: DebBuildOptionsAndProfiles, 

411) -> None: 

412 doc_dir = fs_root.lookup(f"./usr/share/doc/{dctrl.name}") 

413 if not doc_dir: 

414 return 

415 changelog = doc_dir.get("changelog.Debian") 

416 if changelog and is_native: 

417 changelog.name = "changelog" 

418 elif is_native: 

419 changelog = doc_dir.get("changelog") 

420 

421 trim = "notrimdch" not in build_env.deb_build_options 

422 

423 kept_entries = None 

424 pruned_changelog = False 

425 if changelog and changelog.has_fs_path: 

426 pruned_changelog, kept_entries = _prune_dch_file( 

427 dctrl, changelog, True, None, trim=trim 

428 ) 

429 

430 if not trim: 

431 return 

432 

433 news_file = doc_dir.get("NEWS.Debian") 

434 if news_file and news_file.has_fs_path and pruned_changelog: 

435 _prune_dch_file(dctrl, news_file, False, kept_entries) 

436 

437 

438_UPSTREAM_CHANGELOG_SOURCE_DIRS = [ 

439 ".", 

440 "doc", 

441 "docs", 

442] 

443_UPSTREAM_CHANGELOG_NAMES = { 

444 # The value is a priority to match the debhelper order. 

445 # - The suffix weights heavier than the basename (because that is what debhelper did) 

446 # 

447 # We list the name/suffix in order of priority in the code. That makes it easier to 

448 # see the priority directly, but it gives the "lowest" value to the most important items 

449 f"{n}{s}": (sw, nw) 

450 for (nw, n), (sw, s) in itertools.product( 

451 enumerate(["changelog", "changes", "history"], start=1), 

452 enumerate(["", ".txt", ".md", ".rst", ".org"], start=1), 

453 ) 

454} 

455_NONE_TUPLE = (None, (0, 0)) 

456 

457 

458def _detect_upstream_changelog(names: Iterable[str]) -> str | None: 

459 matches = [] 

460 for name in names: 

461 match_priority = _UPSTREAM_CHANGELOG_NAMES.get(name.lower()) 

462 if match_priority is not None: 

463 matches.append((name, match_priority)) 

464 return min(matches, default=_NONE_TUPLE, key=operator.itemgetter(1))[0] 

465 

466 

467def install_upstream_changelog( 

468 dctrl_bin: BinaryPackage, 

469 fs_root: InMemoryVirtualPathBase, 

470 source_fs_root: VirtualPath, 

471) -> None: 

472 doc_dir = f"./usr/share/doc/{dctrl_bin.name}" 

473 bdir = fs_root.lookup(doc_dir) 

474 if bdir and not bdir.is_dir: 

475 # "/usr/share/doc/foo -> bar" symlink. Avoid croaking on those per: 

476 # https://salsa.debian.org/debian/debputy/-/issues/49 

477 return 

478 

479 if bdir: 

480 if bdir.get("changelog") or bdir.get("changelog.gz"): 

481 # Upstream's build system already provided the changelog with the correct name. 

482 # Accept that as the canonical one. 

483 return 

484 upstream_changelog = _detect_upstream_changelog( 

485 p.name for p in bdir.iterdir() if p.is_file and p.has_fs_path and p.size > 0 

486 ) 

487 if upstream_changelog: 

488 p = bdir.lookup(upstream_changelog) 

489 assert p is not None # Mostly as a typing hint 

490 p.name = "changelog" 

491 return 

492 for dirname in _UPSTREAM_CHANGELOG_SOURCE_DIRS: 

493 dir_path = source_fs_root.lookup(dirname) 

494 if not dir_path or not dir_path.is_dir: 

495 continue 

496 changelog_name = _detect_upstream_changelog( 

497 p.name 

498 for p in dir_path.iterdir() 

499 if p.is_file and p.has_fs_path and p.size > 0 

500 ) 

501 if changelog_name: 

502 if bdir is None: 502 ↛ 504line 502 didn't jump to line 504 because the condition on line 502 was always true

503 bdir = fs_root.mkdirs(doc_dir) 

504 bdir.insert_file_from_fs_path( 

505 "changelog", 

506 dir_path[changelog_name].fs_path, 

507 ) 

508 break 

509 

510 

511@dataclasses.dataclass(slots=True) 

512class _ElfInfo: 

513 path: VirtualPath 

514 fs_path: str 

515 is_stripped: bool | None = None 

516 build_id: str | None = None 

517 dbgsym: InMemoryVirtualPathBase | None = None 

518 

519 

520def _elf_static_lib_walk_filter( 

521 fs_path: VirtualPath, 

522 children: list[VP], 

523) -> bool: 

524 if ( 

525 fs_path.name == ".build-id" 

526 and assume_not_none(fs_path.parent_dir).name == "debug" 

527 ): 

528 children.clear() 

529 return False 

530 # Deal with some special cases, where certain files are not supposed to be stripped in a given directory 

531 if "debug/" in fs_path.path or fs_path.name.endswith("debug/"): 

532 # FIXME: We need a way to opt out of this per #468333/#1016122 

533 for so_file in (f for f in list(children) if f.name.endswith(".so")): 

534 children.remove(so_file) 

535 if "/guile/" in fs_path.path or fs_path.name == "guile": 

536 for go_file in (f for f in list(children) if f.name.endswith(".go")): 

537 children.remove(go_file) 

538 return True 

539 

540 

541@contextlib.contextmanager 

542def _all_elf_files(fs_root: VirtualPath) -> Iterator[dict[str, _ElfInfo]]: 

543 all_elf_files = find_all_elf_files( 

544 fs_root, 

545 walk_filter=_elf_static_lib_walk_filter, 

546 ) 

547 if not all_elf_files: 

548 yield {} 

549 return 

550 with ExitStack() as cm_stack: 

551 resolved = ( 

552 (p, cm_stack.enter_context(p.replace_fs_path_content())) 

553 for p in all_elf_files 

554 ) 

555 elf_info = { 

556 fs_path: _ElfInfo( 

557 path=assume_not_none(fs_root.lookup(detached_path.path)), 

558 fs_path=fs_path, 

559 ) 

560 for detached_path, fs_path in resolved 

561 } 

562 _resolve_build_ids(elf_info) 

563 yield elf_info 

564 

565 

566def _find_all_static_libs( 

567 fs_root: InMemoryVirtualPathBase, 

568) -> Iterator[InMemoryVirtualPathBase]: 

569 for path, children in fs_root.walk(): 

570 # Matching the logic of dh_strip for now. 

571 if not _elf_static_lib_walk_filter(path, children): 

572 continue 

573 if not path.is_file: 

574 continue 

575 if path.name.startswith("lib") and path.name.endswith("_g.a"): 

576 # _g.a are historically ignored. I do not remember why, but guessing the "_g" is 

577 # an encoding of gcc's -g parameter into the filename (with -g meaning "I want debug 

578 # symbols") 

579 continue 

580 if not path.has_fs_path: 

581 continue 

582 with path.open(byte_io=True) as fd: 

583 magic = fd.read(8) 

584 if magic not in (b"!<arch>\n", b"!<thin>\n"): 

585 continue 

586 # Maybe we should see if the first file looks like an index file. 

587 # Three random .a samples suggests the index file is named "/" 

588 # Not sure if we should skip past it and then do the ELF check or just assume 

589 # that "index => static lib". 

590 data = fd.read(1024 * 1024) 

591 if b"\0" not in data and ELF_MAGIC not in data: 

592 continue 

593 yield path 

594 

595 

596@contextlib.contextmanager 

597def _all_static_libs(fs_root: InMemoryVirtualPathBase) -> Iterator[list[str]]: 

598 all_static_libs = list(_find_all_static_libs(fs_root)) 

599 if not all_static_libs: 

600 yield [] 

601 return 

602 with ExitStack() as cm_stack: 

603 resolved: list[str] = [ 

604 cm_stack.enter_context(p.replace_fs_path_content()) for p in all_static_libs 

605 ] 

606 yield resolved 

607 

608 

609_FILE_BUILD_ID_RE = re.compile(rb"BuildID(?:\[\S+\])?=([A-Fa-f0-9]+)") 

610 

611 

612def _resolve_build_ids(elf_info: dict[str, _ElfInfo]) -> None: 

613 static_cmd = ["file", "-00", "-N"] 

614 if detect_fakeroot(): 

615 static_cmd.append("--no-sandbox") 

616 

617 for cmd in xargs(static_cmd, (i.fs_path for i in elf_info.values())): 

618 _info(f"Looking up build-ids via: {escape_shell(*cmd)}") 

619 output = subprocess.check_output(cmd) 

620 

621 # Trailing "\0" gives an empty element in the end when splitting, so strip it out 

622 lines = output.rstrip(b"\0").split(b"\0") 

623 

624 for fs_path_b, verdict in grouper(lines, 2, incomplete="strict"): 

625 fs_path = fs_path_b.decode("utf-8") 

626 info = elf_info[fs_path] 

627 info.is_stripped = b"not stripped" not in verdict 

628 m = _FILE_BUILD_ID_RE.search(verdict) 

629 if m: 

630 info.build_id = m.group(1).decode("utf-8") 

631 

632 

633def _make_debug_file( 

634 objcopy: str, 

635 fs_path: str, 

636 build_id: str, 

637 dbgsym_fs_root: InMemoryVirtualPathBase, 

638) -> InMemoryVirtualPathBase: 

639 dbgsym_dirname = f"./usr/lib/debug/.build-id/{build_id[0:2]}/" 

640 dbgsym_basename = f"{build_id[2:]}.debug" 

641 dbgsym_dir = dbgsym_fs_root.mkdirs(dbgsym_dirname) 

642 if dbgsym_basename in dbgsym_dir: 

643 return dbgsym_dir[dbgsym_basename] 

644 # objcopy is a pain and includes the basename verbatim when you do `--add-gnu-debuglink` without having an option 

645 # to overwrite the physical basename. So we have to ensure that the physical basename matches the installed 

646 # basename. 

647 with dbgsym_dir.add_file( 

648 dbgsym_basename, 

649 unlink_if_exists=False, 

650 fs_basename_matters=True, 

651 subdir_key="dbgsym-build-ids", 

652 ) as dbgsym: 

653 try: 

654 subprocess.check_call( 

655 [ 

656 objcopy, 

657 "--only-keep-debug", 

658 "--compress-debug-sections", 

659 fs_path, 

660 dbgsym.fs_path, 

661 ] 

662 ) 

663 except subprocess.CalledProcessError: 

664 full_command = ( 

665 f"{objcopy} --only-keep-debug --compress-debug-sections" 

666 f" {escape_shell(fs_path, dbgsym.fs_path)}" 

667 ) 

668 _error( 

669 f"Attempting to create a .debug file failed. Please review the error message from {objcopy} to" 

670 f" understand what went wrong. Full command was: {full_command}" 

671 ) 

672 return dbgsym 

673 

674 

675def _strip_binary(strip: str, options: list[str], paths: Iterable[str]) -> None: 

676 # We assume the paths are obtained via `p.replace_fs_path_content()`, 

677 # which is the case at the time of written and should remain so forever. 

678 it = iter(paths) 

679 first = next(it, None) 

680 if first is None: 

681 return 

682 static_cmd = [strip] 

683 static_cmd.extend(options) 

684 

685 for cmd in xargs(static_cmd, itertools.chain((first,), (f for f in it))): 

686 _info(f"Removing unnecessary ELF debug info via: {escape_shell(*cmd)}") 

687 try: 

688 subprocess.check_call( 

689 cmd, 

690 stdin=subprocess.DEVNULL, 

691 restore_signals=True, 

692 ) 

693 except subprocess.CalledProcessError: 

694 _error( 

695 f"Attempting to remove ELF debug info failed. Please review the error from {strip} above" 

696 f" understand what went wrong." 

697 ) 

698 

699 

700def _attach_debug( 

701 objcopy: str, elf_binary: VirtualPath, dbgsym: InMemoryVirtualPathBase 

702) -> None: 

703 dbgsym_fs_path: str 

704 with dbgsym.replace_fs_path_content() as dbgsym_fs_path: 

705 cmd = [objcopy, "--add-gnu-debuglink", dbgsym_fs_path, elf_binary.fs_path] 

706 print_command(*cmd) 

707 try: 

708 subprocess.check_call(cmd) 

709 except subprocess.CalledProcessError: 

710 _error( 

711 f"Attempting to attach ELF debug link to ELF binary failed. Please review the error from {objcopy}" 

712 f" above understand what went wrong." 

713 ) 

714 

715 

716@functools.lru_cache 

717def _has_tool(tool: str) -> bool: 

718 return shutil.which(tool) is not None 

719 

720 

721def _run_dwz( 

722 dctrl: BinaryPackage, 

723 dbgsym_fs_root: InMemoryVirtualPathBase, 

724 unstripped_elf_info: list[_ElfInfo], 

725) -> None: 

726 if not unstripped_elf_info or dctrl.is_udeb or not _has_tool("dwz"): 

727 return 

728 dwz_cmd = ["dwz"] 

729 dwz_ma_dir_name = f"usr/lib/debug/.dwz/{dctrl.deb_multiarch}" 

730 dwz_ma_basename = f"{dctrl.name}.debug" 

731 multifile = f"{dwz_ma_dir_name}/{dwz_ma_basename}" 

732 build_time_multifile = None 

733 if len(unstripped_elf_info) > 1: 

734 fs_content_dir = generated_content_dir() 

735 fd, build_time_multifile = mkstemp(suffix=dwz_ma_basename, dir=fs_content_dir) 

736 os.close(fd) 

737 dwz_cmd.append(f"-m{build_time_multifile}") 

738 dwz_cmd.append(f"-M/{multifile}") 

739 

740 # TODO: configuration for disabling multi-file and tweaking memory limits 

741 

742 dwz_cmd.extend(e.fs_path for e in unstripped_elf_info) 

743 

744 _info(f"Deduplicating ELF debug info via: {escape_shell(*dwz_cmd)}") 

745 try: 

746 subprocess.check_call(dwz_cmd) 

747 except subprocess.CalledProcessError: 

748 _error( 

749 "Attempting to deduplicate ELF info via dwz failed. Please review the output from dwz above" 

750 " to understand what went wrong." 

751 ) 

752 if build_time_multifile is not None and os.stat(build_time_multifile).st_size > 0: 

753 dwz_dir = dbgsym_fs_root.mkdirs(dwz_ma_dir_name) 

754 dwz_dir.insert_file_from_fs_path( 

755 dwz_ma_basename, 

756 build_time_multifile, 

757 mode=0o644, 

758 require_copy_on_write=False, 

759 follow_symlinks=False, 

760 ) 

761 

762 

763def relocate_dwarves_into_dbgsym_packages( 

764 dctrl: BinaryPackage, 

765 package_fs_root: InMemoryVirtualPathBase, 

766 dbgsym_fs_root: VirtualPath, 

767 *, 

768 run_dwz: bool = False, 

769) -> list[str]: 

770 # FIXME: hardlinks 

771 with _all_static_libs(package_fs_root) as all_static_files: 

772 if all_static_files: 

773 strip = dctrl.cross_command("strip") 

774 _strip_binary( 

775 strip, 

776 [ 

777 "--strip-debug", 

778 "--remove-section=.comment", 

779 "--remove-section=.note", 

780 "--enable-deterministic-archives", 

781 "-R", 

782 ".gnu.lto_*", 

783 "-R", 

784 ".gnu.debuglto_*", 

785 "-N", 

786 "__gnu_lto_slim", 

787 "-N", 

788 "__gnu_lto_v1", 

789 ], 

790 all_static_files, 

791 ) 

792 

793 with _all_elf_files(package_fs_root) as all_elf_files: 

794 if not all_elf_files: 

795 return [] 

796 objcopy = dctrl.cross_command("objcopy") 

797 strip = dctrl.cross_command("strip") 

798 unstripped_elf_info = list( 

799 e for e in all_elf_files.values() if not e.is_stripped 

800 ) 

801 

802 if run_dwz: 

803 _run_dwz(dctrl, dbgsym_fs_root, unstripped_elf_info) 

804 

805 for elf_info in unstripped_elf_info: 

806 elf_info.dbgsym = _make_debug_file( 

807 objcopy, 

808 elf_info.fs_path, 

809 assume_not_none(elf_info.build_id), 

810 dbgsym_fs_root, 

811 ) 

812 

813 # Note: When run strip, we do so also on already stripped ELF binaries because that is what debhelper does! 

814 # Executables (defined by mode) 

815 _strip_binary( 

816 strip, 

817 ["--remove-section=.comment", "--remove-section=.note"], 

818 (i.fs_path for i in all_elf_files.values() if i.path.is_executable), 

819 ) 

820 

821 # Libraries (defined by mode) 

822 _strip_binary( 

823 strip, 

824 ["--remove-section=.comment", "--remove-section=.note", "--strip-unneeded"], 

825 (i.fs_path for i in all_elf_files.values() if not i.path.is_executable), 

826 ) 

827 

828 for elf_info in unstripped_elf_info: 

829 _attach_debug( 

830 objcopy, 

831 assume_not_none(elf_info.path), 

832 assume_not_none(elf_info.dbgsym), 

833 ) 

834 

835 # Set for uniqueness 

836 all_debug_info = sorted( 

837 {assume_not_none(i.build_id) for i in unstripped_elf_info} 

838 ) 

839 

840 dbgsym_doc_dir = dbgsym_fs_root.mkdirs("./usr/share/doc/") 

841 dbgsym_doc_dir.add_symlink(f"{dctrl.name}-dbgsym", dctrl.name) 

842 return all_debug_info 

843 

844 

845def run_package_processors( 

846 manifest: "HighLevelManifest", 

847 package_metadata_context: PackageProcessingContext, 

848 fs_root: VirtualPath, 

849) -> None: 

850 pppps = manifest.plugin_provided_feature_set.package_processors_in_order() 

851 binary_package = package_metadata_context.binary_package 

852 for pppp in pppps: 

853 if not pppp.applies_to(binary_package): 

854 continue 

855 pppp.run_package_processor(fs_root, None, package_metadata_context) 

856 

857 

858def cross_package_control_files( 

859 package_data_table: PackageDataTable, 

860 manifest: "HighLevelManifest", 

861) -> None: 

862 errors = [] 

863 combined_shlibs = ShlibsContent() 

864 shlibs_dir = None 

865 shlib_dirs: list[str] = [] 

866 shlibs_local = manifest.debian_dir.get("shlibs.local") 

867 if shlibs_local and shlibs_local.is_file: 

868 with shlibs_local.open() as fd: 

869 combined_shlibs.add_entries_from_shlibs_file(fd) 

870 

871 debputy_plugin_metadata = manifest.plugin_provided_feature_set.plugin_data[ 

872 "debputy" 

873 ] 

874 

875 for binary_package_data in package_data_table: 

876 binary_package = binary_package_data.binary_package 

877 if binary_package.is_arch_all or not binary_package.should_be_acted_on: 

878 continue 

879 fs_root = binary_package_data.fs_root 

880 package_state = manifest.package_state_for(binary_package.name) 

881 related_udeb_package = ( 

882 binary_package_data.package_metadata_context.related_udeb_package 

883 ) 

884 

885 udeb_package_name = related_udeb_package.name if related_udeb_package else None 

886 ctrl = binary_package_data.ctrl_creator.for_plugin( 

887 debputy_plugin_metadata, 

888 "compute_shlibs", 

889 ) 

890 try: 

891 soname_info_list = compute_shlibs( 

892 binary_package, 

893 binary_package_data.control_output_dir.fs_path, 

894 fs_root, 

895 manifest, 

896 udeb_package_name, 

897 ctrl, 

898 package_state.reserved_packager_provided_files, 

899 combined_shlibs, 

900 ) 

901 except DebputyDpkgGensymbolsError as e: 

902 errors.append(e.message) 

903 else: 

904 if soname_info_list: 

905 if shlibs_dir is None: 

906 shlibs_dir = generated_content_dir( 

907 subdir_key="_shlibs_materialization_dir" 

908 ) 

909 generate_shlib_dirs( 

910 binary_package, 

911 shlibs_dir, 

912 soname_info_list, 

913 shlib_dirs, 

914 ) 

915 if errors: 

916 for error in errors: 

917 _warn(error) 

918 _error("Stopping due to the errors above") 

919 

920 generated_shlibs_local = None 

921 if combined_shlibs: 

922 if shlibs_dir is None: 

923 shlibs_dir = generated_content_dir(subdir_key="_shlibs_materialization_dir") 

924 generated_shlibs_local = os.path.join(shlibs_dir, "shlibs.local") 

925 with open(generated_shlibs_local, "w", encoding="utf-8") as fd: 

926 combined_shlibs.write_to(fd) 

927 _info(f"Generated {generated_shlibs_local} for dpkg-shlibdeps") 

928 

929 for binary_package_data in package_data_table: 

930 binary_package = binary_package_data.binary_package 

931 if binary_package.is_arch_all or not binary_package.should_be_acted_on: 

932 continue 

933 binary_package_data.ctrl_creator.shlibs_details = ( 

934 generated_shlibs_local, 

935 shlib_dirs, 

936 ) 

937 

938 

939def _relevant_service_definitions( 

940 service_rule: ServiceRule, 

941 service_managers: list[str] | frozenset[str], 

942 by_service_manager_key: Mapping[ 

943 tuple[str, str, str, str], tuple[ServiceManagerDetails, ServiceDefinition[Any]] 

944 ], 

945 aliases: Mapping[str, Sequence[tuple[str, str, str, str]]], 

946) -> Iterable[tuple[tuple[str, str, str, str], ServiceDefinition[Any]]]: 

947 as_keys = (key for key in aliases[service_rule.service]) 

948 

949 pending_queue = { 

950 key 

951 for key in as_keys 

952 if key in by_service_manager_key 

953 and service_rule.applies_to_service_manager(key[-1]) 

954 } 

955 relevant_names: dict[tuple[str, str, str, str], ServiceDefinition[Any]] = {} 

956 seen_keys = set() 

957 

958 if not pending_queue: 

959 service_manager_names = ", ".join(sorted(service_managers)) 

960 _error( 

961 f"No none of the service managers ({service_manager_names}) detected a service named" 

962 f" {service_rule.service} (type: {service_rule.type_of_service}, scope: {service_rule.service_scope})," 

963 f" but the manifest definition at {service_rule.definition_source} requested that." 

964 ) 

965 

966 while pending_queue: 

967 next_key = pending_queue.pop() 

968 seen_keys.add(next_key) 

969 _, definition = by_service_manager_key[next_key] 

970 yield next_key, definition 

971 for name in definition.names: 

972 for target_key in aliases[name]: 

973 if ( 

974 target_key not in seen_keys 

975 and service_rule.applies_to_service_manager(target_key[-1]) 

976 ): 

977 pending_queue.add(target_key) 

978 

979 return relevant_names.items() 

980 

981 

982def handle_service_management( 

983 binary_package_data: "BinaryPackageData", 

984 manifest: "HighLevelManifest", 

985 package_metadata_context: PackageProcessingContext, 

986 fs_root: VirtualPath, 

987 feature_set: PluginProvidedFeatureSet, 

988) -> None: 

989 

990 by_service_manager_key = {} 

991 aliases_by_name = collections.defaultdict(list) 

992 

993 state = manifest.package_state_for(binary_package_data.binary_package.name) 

994 all_service_managers = list(feature_set.service_managers) 

995 requested_service_rules = state.requested_service_rules 

996 for requested_service_rule in requested_service_rules: 

997 if not requested_service_rule.service_managers: 

998 continue 

999 for manager in requested_service_rule.service_managers: 

1000 if manager not in feature_set.service_managers: 

1001 # FIXME: Missing definition source; move to parsing. 

1002 _error( 

1003 f"Unknown service manager {manager} used at {requested_service_rule.definition_source}" 

1004 ) 

1005 

1006 for service_manager_details in feature_set.service_managers.values(): 

1007 service_registry: ServiceRegistryImpl = ServiceRegistryImpl( 

1008 service_manager_details 

1009 ) 

1010 service_manager_details.service_detector( 

1011 fs_root, 

1012 service_registry, 

1013 package_metadata_context, 

1014 ) 

1015 

1016 service_definitions = service_registry.detected_services 

1017 if not service_definitions: 

1018 continue 

1019 

1020 for plugin_provided_definition in service_definitions: 

1021 key = ( 

1022 plugin_provided_definition.name, 

1023 plugin_provided_definition.type_of_service, 

1024 plugin_provided_definition.service_scope, 

1025 service_manager_details.service_manager, 

1026 ) 

1027 by_service_manager_key[key] = ( 

1028 service_manager_details, 

1029 plugin_provided_definition, 

1030 ) 

1031 

1032 for name in plugin_provided_definition.names: 

1033 aliases_by_name[name].append(key) 

1034 

1035 for requested_service_rule in requested_service_rules: 

1036 explicit_service_managers = requested_service_rule.service_managers is not None 

1037 related_service_managers = ( 

1038 requested_service_rule.service_managers or all_service_managers 

1039 ) 

1040 seen_service_managers = set() 

1041 for service_key, service_definition in _relevant_service_definitions( 

1042 requested_service_rule, 

1043 related_service_managers, 

1044 by_service_manager_key, 

1045 aliases_by_name, 

1046 ): 

1047 sm = service_key[-1] 

1048 seen_service_managers.add(sm) 

1049 by_service_manager_key[service_key] = ( 

1050 by_service_manager_key[service_key][0], 

1051 requested_service_rule.apply_to_service_definition(service_definition), 

1052 ) 

1053 if ( 

1054 explicit_service_managers 

1055 and seen_service_managers != related_service_managers 

1056 ): 

1057 missing_sms = ", ".join( 

1058 sorted(related_service_managers - seen_service_managers) 

1059 ) 

1060 _error( 

1061 f"The rule {requested_service_rule.definition_source} explicitly requested which service managers" 

1062 f" it should apply to. However, the following service managers did not provide a service of that" 

1063 f" name, type and scope: {missing_sms}. Please check the rule is correct and either provide the" 

1064 f" missing service or update the definition match the relevant services." 

1065 ) 

1066 

1067 per_service_manager = {} 

1068 

1069 for ( 

1070 service_manager_details, 

1071 plugin_provided_definition, 

1072 ) in by_service_manager_key.values(): 

1073 service_manager = service_manager_details.service_manager 

1074 if service_manager not in per_service_manager: 

1075 per_service_manager[service_manager] = ( 

1076 service_manager_details, 

1077 [plugin_provided_definition], 

1078 ) 

1079 else: 

1080 per_service_manager[service_manager][1].append(plugin_provided_definition) 

1081 

1082 for ( 

1083 service_manager_details, 

1084 final_service_definitions, 

1085 ) in per_service_manager.values(): 

1086 ctrl = binary_package_data.ctrl_creator.for_plugin( 

1087 service_manager_details.plugin_metadata, 

1088 service_manager_details.service_manager, 

1089 default_snippet_order="service", 

1090 ) 

1091 _info(f"Applying {final_service_definitions}") 

1092 service_manager_details.service_integrator( 

1093 final_service_definitions, 

1094 ctrl, 

1095 package_metadata_context, 

1096 ) 

1097 

1098 

1099def setup_control_files( 

1100 binary_package_data: "BinaryPackageData", 

1101 manifest: "HighLevelManifest", 

1102 dbgsym_fs_root: VirtualPath, 

1103 dbgsym_ids: list[str], 

1104 package_metadata_context: PackageProcessingContext, 

1105 *, 

1106 allow_ctrl_file_management: bool = True, 

1107) -> None: 

1108 binary_package = package_metadata_context.binary_package 

1109 control_output_dir = binary_package_data.control_output_dir 

1110 control_output_fs_path = control_output_dir.fs_path 

1111 fs_root = binary_package_data.fs_root 

1112 package_state = manifest.package_state_for(binary_package.name) 

1113 

1114 feature_set: PluginProvidedFeatureSet = manifest.plugin_provided_feature_set 

1115 metadata_maintscript_detectors = feature_set.metadata_maintscript_detectors 

1116 substvars = binary_package_data.substvars 

1117 

1118 snippets = STD_CONTROL_SCRIPTS 

1119 generated_triggers = list(binary_package_data.ctrl_creator.generated_triggers()) 

1120 

1121 if binary_package.is_udeb: 

1122 # FIXME: Add missing udeb scripts 

1123 snippets = ["postinst"] 

1124 

1125 if allow_ctrl_file_management: 

1126 process_alternatives( 

1127 binary_package, 

1128 fs_root, 

1129 package_state.reserved_packager_provided_files, 

1130 package_state.maintscript_snippets, 

1131 substvars, 

1132 ) 

1133 process_debconf_templates( 

1134 binary_package, 

1135 package_state.reserved_packager_provided_files, 

1136 package_state.maintscript_snippets, 

1137 substvars, 

1138 control_output_fs_path, 

1139 ) 

1140 

1141 handle_service_management( 

1142 binary_package_data, 

1143 manifest, 

1144 package_metadata_context, 

1145 fs_root, 

1146 feature_set, 

1147 ) 

1148 

1149 plugin_detector_definition: MetadataOrMaintscriptDetector 

1150 for plugin_detector_definition in itertools.chain.from_iterable( 

1151 metadata_maintscript_detectors.values() 

1152 ): 

1153 if not plugin_detector_definition.applies_to(binary_package): 

1154 continue 

1155 ctrl = binary_package_data.ctrl_creator.for_plugin( 

1156 plugin_detector_definition.plugin_metadata, 

1157 plugin_detector_definition.detector_id, 

1158 ) 

1159 plugin_detector_definition.run_detector( 

1160 fs_root, ctrl, package_metadata_context 

1161 ) 

1162 

1163 for script in snippets: 

1164 _generate_snippet( 

1165 control_output_fs_path, 

1166 script, 

1167 package_state.maintscript_snippets, 

1168 ) 

1169 

1170 else: 

1171 state = manifest.package_state_for(binary_package_data.binary_package.name) 

1172 if state.requested_service_rules: 

1173 service_source = state.requested_service_rules[0].definition_source 

1174 _error( 

1175 f"Use of service definitions (such as {service_source}) is not supported in this integration mode" 

1176 ) 

1177 for script, snippet_container in package_state.maintscript_snippets.items(): 

1178 for snippet in snippet_container.all_snippets(): 

1179 source = snippet.definition_source 

1180 _error( 

1181 f"This integration mode cannot use maintscript snippets" 

1182 f' (since dh_installdeb has already been called). However, "{source}" triggered' 

1183 f" a snippet for {script}. Please remove the offending definition if it is from" 

1184 f" the manifest or file a bug if it is caused by a built-in rule." 

1185 ) 

1186 

1187 for trigger in generated_triggers: 

1188 source = f"{trigger.provider.plugin_name}:{trigger.provider_source_id}" 

1189 _error( 

1190 f"This integration mode must not generate triggers" 

1191 f' (since dh_installdeb has already been called). However, "{source}" created' 

1192 f" a trigger. Please remove the offending definition if it is from" 

1193 f" the manifest or file a bug if it is caused by a built-in rule." 

1194 ) 

1195 

1196 shlibdeps_definition = [ 

1197 d 

1198 for d in metadata_maintscript_detectors["debputy"] 

1199 if d.detector_id == "dpkg-shlibdeps" 

1200 ][0] 

1201 

1202 ctrl = binary_package_data.ctrl_creator.for_plugin( 

1203 shlibdeps_definition.plugin_metadata, 

1204 shlibdeps_definition.detector_id, 

1205 ) 

1206 shlibdeps_definition.run_detector(fs_root, ctrl, package_metadata_context) 

1207 

1208 dh_staging_dir = os.path.join("debian", binary_package.name, "DEBIAN") 

1209 try: 

1210 with os.scandir(dh_staging_dir) as it: 

1211 existing_control_files = [ 

1212 f.path 

1213 for f in it 

1214 if f.is_file(follow_symlinks=False) 

1215 and f.name not in ("control", "md5sums") 

1216 ] 

1217 except FileNotFoundError: 

1218 existing_control_files = [] 

1219 

1220 if existing_control_files: 

1221 cmd = ["cp", "-a"] 

1222 cmd.extend(existing_control_files) 

1223 cmd.append(control_output_fs_path) 

1224 print_command(*cmd) 

1225 subprocess.check_call(cmd) 

1226 

1227 if binary_package.is_udeb: 

1228 _generate_control_files( 

1229 binary_package_data, 

1230 package_state, 

1231 control_output_dir, 

1232 fs_root, 

1233 substvars, 

1234 # We never built udebs due to #797391, so skip over this information, 

1235 # when creating the udeb 

1236 None, 

1237 None, 

1238 ) 

1239 return 

1240 

1241 if generated_triggers: 

1242 assert allow_ctrl_file_management 

1243 dest_file = os.path.join(control_output_fs_path, "triggers") 

1244 with open(dest_file, "a", encoding="utf-8") as fd: 

1245 fd.writelines( 

1246 textwrap.dedent( 

1247 f"""\ 

1248 # Added by {t.provider_source_id} from {t.provider.plugin_name} 

1249 {t.dpkg_trigger_type} {t.dpkg_trigger_target} 

1250 """ 

1251 ) 

1252 for t in generated_triggers 

1253 ) 

1254 os.chmod(fd.fileno(), 0o644) 

1255 

1256 if allow_ctrl_file_management: 

1257 install_or_generate_conffiles( 

1258 control_output_dir, 

1259 fs_root, 

1260 package_state.reserved_packager_provided_files, 

1261 ) 

1262 

1263 _generate_control_files( 

1264 binary_package_data, 

1265 package_state, 

1266 control_output_dir, 

1267 fs_root, 

1268 substvars, 

1269 dbgsym_fs_root, 

1270 dbgsym_ids, 

1271 ) 

1272 

1273 

1274def _generate_snippet( 

1275 control_output_dir: str, 

1276 script: str, 

1277 maintscript_snippets: dict[str, MaintscriptSnippetContainer], 

1278) -> None: 

1279 debputy_snippets = maintscript_snippets.get(script) 

1280 if debputy_snippets is None: 

1281 return 

1282 reverse = script in ("prerm", "postrm") 

1283 snippets = [ 

1284 debputy_snippets.generate_snippet(reverse=reverse), 

1285 debputy_snippets.generate_snippet(snippet_order="service", reverse=reverse), 

1286 ] 

1287 if reverse: 

1288 snippets = reversed(snippets) 

1289 full_content = "".join(f"{s}\n" for s in filter(None, snippets)) 

1290 if not full_content: 

1291 return 

1292 filename = os.path.join(control_output_dir, script) 

1293 with open(filename, "w") as fd: 

1294 fd.write("#!/bin/sh\nset -e\n\n") 

1295 fd.write(full_content) 

1296 os.chmod(fd.fileno(), 0o755) 

1297 

1298 

1299def _add_conffiles( 

1300 ctrl_root: VirtualPathBase, 

1301 conffile_matches: Iterable[VirtualPath], 

1302) -> None: 

1303 it = iter(conffile_matches) 

1304 first = next(it, None) 

1305 if first is None: 

1306 return 

1307 conffiles = itertools.chain([first], it) 

1308 with ctrl_root.open_child("conffiles", "at") as fd: 

1309 for conffile_match in conffiles: 

1310 conffile = conffile_match.absolute 

1311 assert conffile_match.is_file 

1312 fd.write(f"{conffile}\n") 

1313 

1314 

1315def _ensure_base_substvars_defined(substvars: FlushableSubstvars) -> None: 

1316 for substvar in ("misc:Depends", "misc:Pre-Depends"): 

1317 if substvar not in substvars: 

1318 substvars[substvar] = "" 

1319 

1320 

1321def compute_installed_size(fs_root: VirtualPath) -> int: 

1322 """Emulate dpkg-gencontrol's code for computing the default Installed-Size""" 

1323 size_in_kb = 0 

1324 hard_links = set() 

1325 for path in fs_root.all_paths(): 

1326 if path.is_symlink or path.is_file: 

1327 try: 

1328 # If it is a VirtualPathBase instance, the use its `.stat()` method 

1329 # since it might have the stat cached as a minor optimization on disk 

1330 # access. Other than that, the `os.lstat` fallback is sufficient. 

1331 if isinstance(path, VirtualPathBase): 1331 ↛ 1334line 1331 didn't jump to line 1334 because the condition on line 1331 was always true

1332 st = path.stat() 

1333 else: 

1334 st = os.lstat(path.fs_path) 

1335 if st.st_nlink > 1: 

1336 hl_key = (st.st_dev, st.st_ino) 

1337 if hl_key in hard_links: 

1338 continue 

1339 hard_links.add(hl_key) 

1340 size = st.st_size 

1341 except PureVirtualPathError: 

1342 # We just assume it is not a hard link when the path is purely virtual 

1343 size = path.size 

1344 path_size = (size + 1023) // 1024 

1345 else: 

1346 path_size = 1 

1347 size_in_kb += path_size 

1348 return size_in_kb 

1349 

1350 

1351def _generate_dbgsym_control_file_if_relevant( 

1352 binary_package: BinaryPackage, 

1353 dbgsym_fs_root: VirtualPath, 

1354 dbgsym_control_dir: FSControlRootDir, 

1355 dbgsym_ids: str, 

1356 multi_arch: str | None, 

1357 dctrl: str, 

1358 extra_common_params: Sequence[str], 

1359) -> None: 

1360 section = binary_package.archive_section 

1361 component = "" 

1362 extra_params = [] 

1363 if section is not None and "/" in section and not section.startswith("main/"): 

1364 component = section.split("/", 1)[1] + "/" 

1365 if multi_arch != "same": 

1366 extra_params.append("-UMulti-Arch") 

1367 else: 

1368 extra_params.append(f"-DMulti-Arch={multi_arch}") 

1369 extra_params.append("-UReplaces") 

1370 extra_params.append("-UBreaks") 

1371 dbgsym_control_fs_path = dbgsym_control_dir.fs_path 

1372 ensure_dir(dbgsym_control_fs_path) 

1373 # Pass it via cmd-line to make it more visible that we are providing the 

1374 # value. It also prevents the dbgsym package from picking up this value. 

1375 total_size = compute_installed_size(dbgsym_fs_root) + compute_installed_size( 

1376 dbgsym_control_dir 

1377 ) 

1378 extra_params.append(f"-VInstalled-Size={total_size}") 

1379 extra_params.extend(extra_common_params) 

1380 

1381 package = binary_package.name 

1382 package_selector = ( 

1383 binary_package.name 

1384 if dctrl == "debian/control" 

1385 else f"{binary_package.name}-dbgsym" 

1386 ) 

1387 dpkg_cmd = [ 

1388 "dpkg-gencontrol", 

1389 f"-p{package_selector}", 

1390 # FIXME: Support d/<pkg>.changelog at some point. 

1391 "-ldebian/changelog", 

1392 "-T/dev/null", 

1393 f"-c{dctrl}", 

1394 f"-O{dbgsym_control_fs_path}/control", 

1395 # Use a placeholder for -P to ensure failure if we forgot to override a path parameter 

1396 "-P/non-existent", 

1397 f"-DPackage={package}-dbgsym", 

1398 "-DDepends=" + package + " (= ${binary:Version})", 

1399 f"-DDescription=debug symbols for {package}", 

1400 f"-DSection={component}debug", 

1401 f"-DBuild-Ids={dbgsym_ids}", 

1402 "-UPre-Depends", 

1403 "-URecommends", 

1404 "-USuggests", 

1405 "-UEnhances", 

1406 "-UProvides", 

1407 "-UEssential", 

1408 "-UConflicts", 

1409 "-DPriority=optional", 

1410 "-UHomepage", 

1411 "-UImportant", 

1412 "-UBuilt-Using", 

1413 "-UStatic-Built-Using", 

1414 "-DAuto-Built-Package=debug-symbols", 

1415 "-UProtected", 

1416 *extra_params, 

1417 ] 

1418 print_command(*dpkg_cmd) 

1419 try: 

1420 subprocess.check_call(dpkg_cmd) 

1421 except subprocess.CalledProcessError: 

1422 _error( 

1423 f"Attempting to generate DEBIAN/control file for {package}-dbgsym failed. Please review the output from " 

1424 " dpkg-gencontrol above to understand what went wrong." 

1425 ) 

1426 os.chmod(os.path.join(dbgsym_control_fs_path, "control"), 0o644) 

1427 

1428 

1429def _all_parent_directories_of(directories: Iterable[str]) -> set[str]: 

1430 result = {"."} 

1431 for path in directories: 

1432 current = os.path.dirname(path) 

1433 while current and current not in result: 

1434 result.add(current) 

1435 current = os.path.dirname(current) 

1436 return result 

1437 

1438 

1439def _compute_multi_arch_for_arch_all_doc( 

1440 binary_package: BinaryPackage, 

1441 fs_root: InMemoryVirtualPathBase, 

1442) -> str | None: 

1443 if not binary_package.name.endswith(("-doc", "-docs")): 

1444 # We limit by package name, since there are tricks involving a `Multi-Arch: no` depending on a 

1445 # `Multi-Arch: same` to emulate `Multi-Arch: allowed`. Said `Multi-Arch: no` can have no contents. 

1446 # 

1447 # That case seems unrealistic for -doc/-docs packages and accordingly the limitation here. 

1448 return None 

1449 acceptable_no_descend_paths = { 

1450 "./usr/share/doc", 

1451 } 

1452 acceptable_files = {f"./usr/share/lintian/overrides/{binary_package.name}"} 

1453 if _any_unacceptable_paths( 

1454 fs_root, 

1455 acceptable_no_descend_paths=acceptable_no_descend_paths, 

1456 acceptable_files=acceptable_files, 

1457 ): 

1458 return None 

1459 return "foreign" 

1460 

1461 

1462def _any_unacceptable_paths( 

1463 fs_root: InMemoryVirtualPathBase, 

1464 *, 

1465 acceptable_no_descend_paths: list[str] | AbstractSet[str] = frozenset(), 

1466 acceptable_files: list[str] | AbstractSet[str] = frozenset(), 

1467) -> bool: 

1468 acceptable_intermediate_dirs = _all_parent_directories_of( 

1469 itertools.chain(acceptable_no_descend_paths, acceptable_files) 

1470 ) 

1471 for fs_path, children in fs_root.walk(): 

1472 path = fs_path.path 

1473 if path in acceptable_no_descend_paths: 

1474 children.clear() 

1475 continue 

1476 if path in acceptable_intermediate_dirs or path in acceptable_files: 

1477 continue 

1478 return True 

1479 return False 

1480 

1481 

1482def auto_compute_multi_arch( 

1483 binary_package: BinaryPackage, 

1484 control_output_dir: VirtualPath, 

1485 fs_root: InMemoryVirtualPathBase, 

1486) -> str | None: 

1487 resolved_arch = binary_package.resolved_architecture 

1488 if any( 

1489 script 

1490 for script in ALL_CONTROL_SCRIPTS 

1491 if (p := control_output_dir.get(script)) is not None and p.is_file 

1492 ): 

1493 return None 

1494 

1495 if resolved_arch == "all": 

1496 return _compute_multi_arch_for_arch_all_doc(binary_package, fs_root) 

1497 

1498 resolved_multiarch = binary_package.deb_multiarch 

1499 assert resolved_arch != "all" 

1500 acceptable_no_descend_paths = { 

1501 f"./usr/lib/{resolved_multiarch}", 

1502 f"./usr/include/{resolved_multiarch}", 

1503 } 

1504 acceptable_files = { 

1505 f"./usr/share/doc/{binary_package.name}/{basename}" 

1506 for basename in ( 

1507 "copyright", 

1508 "changelog.gz", 

1509 "changelog.Debian.gz", 

1510 f"changelog.Debian.{resolved_arch}.gz", 

1511 "NEWS.Debian", 

1512 "NEWS.Debian.gz", 

1513 "README.Debian", 

1514 "README.Debian.gz", 

1515 ) 

1516 } 

1517 

1518 # Note that the lintian-overrides file is deliberately omitted from the allow-list. We would have to know that the 

1519 # override does not use architecture segments. With pure debputy, this is guaranteed (debputy 

1520 # does not allow lintian-overrides with architecture segment). However, with a mixed debhelper + debputy, 

1521 # `dh_lintian` allows it with compat 13 or older. 

1522 

1523 if _any_unacceptable_paths( 

1524 fs_root, 

1525 acceptable_no_descend_paths=acceptable_no_descend_paths, 

1526 acceptable_files=acceptable_files, 

1527 ): 

1528 return None 

1529 

1530 return "same" 

1531 

1532 

1533@functools.lru_cache 

1534def _has_t64_enabled() -> bool: 

1535 try: 

1536 output = subprocess.check_output( 

1537 ["dpkg-buildflags", "--query-features", "abi"] 

1538 ).decode() 

1539 except (subprocess.CalledProcessError, FileNotFoundError): 

1540 return False 

1541 

1542 for stanza in Deb822.iter_paragraphs(output): 

1543 if stanza.get("Feature") == "time64" and stanza.get("Enabled") == "yes": 

1544 return True 

1545 return False 

1546 

1547 

1548def _t64_migration_substvar( 

1549 binary_package: BinaryPackage, 

1550 control_output_dir: VirtualPath, 

1551 substvars: FlushableSubstvars, 

1552) -> None: 

1553 name = binary_package.name 

1554 compat_name = binary_package.fields.get("X-Time64-Compat") 

1555 if compat_name is None and not _T64_REGEX.match(name): 

1556 return 

1557 

1558 if not any( 

1559 p.is_file 

1560 for n in ["symbols", "shlibs"] 

1561 if (p := control_output_dir.get(n)) is not None 

1562 ): 

1563 return 

1564 

1565 if compat_name is None: 

1566 compat_name = name.replace("t64", "", 1) 

1567 if compat_name == name: 

1568 raise AssertionError( 

1569 f"Failed to derive a t64 compat name for {name}. Please file a bug against debputy." 

1570 " As a work around, you can explicitly provide a X-Time64-Compat header in debian/control" 

1571 " where you specify the desired compat name." 

1572 ) 

1573 

1574 arch_bits = binary_package.package_deb_architecture_variable("ARCH_BITS") 

1575 

1576 if arch_bits != "32" or not _has_t64_enabled(): 

1577 substvars.add_dependency( 

1578 _T64_PROVIDES, 

1579 f"{compat_name} (= ${ binary:Version} )", 

1580 ) 

1581 elif _T64_PROVIDES not in substvars: 

1582 substvars[_T64_PROVIDES] = "" 

1583 

1584 

1585@functools.lru_cache 

1586def dpkg_field_list_pkg_dep() -> Sequence[str]: 

1587 try: 

1588 output = subprocess.check_output( 

1589 [ 

1590 "perl", 

1591 "-MDpkg::Control::Fields", 

1592 "-e", 

1593 r'print "$_\n" for field_list_pkg_dep', 

1594 ] 

1595 ) 

1596 except (FileNotFoundError, subprocess.CalledProcessError): 

1597 _error("Could not run perl -MDpkg::Control::Fields to get a list of fields") 

1598 return output.decode("utf-8").splitlines(keepends=False) 

1599 

1600 

1601_SUBSTVARS_FIELDS_NOT_SUPPORTED_BY_DPKG = { 

1602 "Commands", 

1603} 

1604 

1605 

1606@functools.lru_cache 

1607def all_auto_substvars() -> Sequence[str]: 

1608 result = [x for x in dpkg_field_list_pkg_dep()] 

1609 result.extend(_SUBSTVARS_FIELDS_NOT_SUPPORTED_BY_DPKG) 

1610 return tuple(result) 

1611 

1612 

1613def _handle_auto_substvars( 

1614 source: SourcePackage, 

1615 dctrl_file: BinaryPackage, 

1616 substvars: FlushableSubstvars, 

1617 has_dbgsym: bool, 

1618) -> str | None: 

1619 auto_substvars_fields = all_auto_substvars() 

1620 auto_substvars_fields_lc = {x.lower(): x for x in auto_substvars_fields} 

1621 substvar_fields = collections.defaultdict(set) 

1622 needs_dbgsym_stanza = False 

1623 for substvar_name, substvar in substvars.as_substvar.items(): 

1624 if ":" not in substvar_name: 

1625 continue 

1626 if substvar.assignment_operator in ("$=", "!="): 

1627 # Will create incorrect results if there is a dbgsym and we do nothing 

1628 needs_dbgsym_stanza = True 

1629 

1630 if substvar.assignment_operator == "$=": 

1631 # Automatically handled; no need for manual merging. 

1632 continue 

1633 _, field = substvar_name.rsplit(":", 1) 

1634 field_lc = field.lower() 

1635 if field_lc not in auto_substvars_fields_lc: 

1636 continue 

1637 substvar_fields[field_lc].add("${" + substvar_name + "}") 

1638 

1639 if not has_dbgsym: 

1640 needs_dbgsym_stanza = False 

1641 

1642 if not substvar_fields and not needs_dbgsym_stanza: 

1643 return None 

1644 

1645 replacement_stanza = debian.deb822.Deb822(dctrl_file.fields) 

1646 

1647 for field_name in auto_substvars_fields: 

1648 field_name_lc = field_name.lower() 

1649 addendum = substvar_fields.get(field_name_lc) 

1650 if addendum is None: 

1651 # No merging required 

1652 continue 

1653 substvars_part = ", ".join(sorted(addendum)) 

1654 existing_value = replacement_stanza.get(field_name) 

1655 

1656 if existing_value is None or existing_value.isspace(): 

1657 final_value = substvars_part 

1658 else: 

1659 existing_value = existing_value.rstrip().rstrip(",") 

1660 final_value = f"{existing_value}, {substvars_part}" 

1661 replacement_stanza[field_name] = final_value 

1662 canonical_field_name = auto_substvars_fields_lc.get(field_name_lc) 

1663 # If `dpkg` does not know the field, we need to inject `XB-` in front 

1664 # of it. 

1665 if ( 

1666 canonical_field_name 

1667 and canonical_field_name in _SUBSTVARS_FIELDS_NOT_SUPPORTED_BY_DPKG 

1668 ): 

1669 replacement_stanza[f"XB-{canonical_field_name}"] = replacement_stanza[ 

1670 field_name 

1671 ] 

1672 del replacement_stanza[field_name] 

1673 

1674 with suppress(KeyError): 

1675 replacement_stanza.order_last("Description") 

1676 

1677 tmpdir = generated_content_dir(package=dctrl_file) 

1678 with tempfile.NamedTemporaryFile( 

1679 mode="wb", 

1680 dir=tmpdir, 

1681 suffix="__DEBIAN_control", 

1682 delete=False, 

1683 ) as fd: 

1684 try: 

1685 cast("Any", source.fields).dump(fd) 

1686 except AttributeError: 

1687 debian.deb822.Deb822(source.fields).dump(fd) 

1688 fd.write(b"\n") 

1689 replacement_stanza.dump(fd) 

1690 

1691 if has_dbgsym: 

1692 # Minimal stanza to avoid substvars warnings. Most fields are still set 

1693 # via -D. 

1694 dbgsym_stanza = Deb822() 

1695 dbgsym_stanza["Package"] = f"{dctrl_file.name}-dbgsym" 

1696 dbgsym_stanza["Architecture"] = dctrl_file.fields["Architecture"] 

1697 dbgsym_stanza["Description"] = f"debug symbols for {dctrl_file.name}" 

1698 fd.write(b"\n") 

1699 dbgsym_stanza.dump(fd) 

1700 

1701 return fd.name 

1702 

1703 

1704def _generate_control_files( 

1705 binary_package_data: "BinaryPackageData", 

1706 package_state: "PackageTransformationDefinition", 

1707 control_output_dir: FSControlRootDir, 

1708 fs_root: InMemoryVirtualPathBase, 

1709 substvars: FlushableSubstvars, 

1710 dbgsym_root_fs: VirtualPath | None, 

1711 dbgsym_build_ids: list[str] | None, 

1712) -> None: 

1713 binary_package = binary_package_data.binary_package 

1714 source_package = binary_package_data.source_package 

1715 package_name = binary_package.name 

1716 extra_common_params = [] 

1717 extra_params_specific = [] 

1718 _ensure_base_substvars_defined(substvars) 

1719 if "Installed-Size" not in substvars: 

1720 # Pass it via cmd-line to make it more visible that we are providing the 

1721 # value. It also prevents the dbgsym package from picking up this value. 

1722 total_size = compute_installed_size(fs_root) + compute_installed_size( 

1723 control_output_dir 

1724 ) 

1725 extra_params_specific.append(f"-VInstalled-Size={total_size}") 

1726 

1727 ma_value = binary_package.fields.get("Multi-Arch") 

1728 if not binary_package.is_udeb and ma_value is None: 

1729 ma_value = auto_compute_multi_arch(binary_package, control_output_dir, fs_root) 

1730 if ma_value is not None: 

1731 _info( 

1732 f'The package "{binary_package.name}" looks like it should be "Multi-Arch: {ma_value}" based' 

1733 ' on the contents and there is no explicit "Multi-Arch" field. Setting the Multi-Arch field' 

1734 ' accordingly in the binary. If this auto-correction is wrong, please add "Multi-Arch: no" to the' 

1735 ' relevant part of "debian/control" to disable this feature.' 

1736 ) 

1737 # We want this to apply to the `-dbgsym` package as well to avoid 

1738 # lintian `debug-package-for-multi-arch-same-pkg-not-coinstallable` 

1739 extra_common_params.append(f"-DMulti-Arch={ma_value}") 

1740 elif ma_value == "no": 

1741 extra_common_params.append("-UMulti-Arch") 

1742 

1743 dbgsym_ids = " ".join(dbgsym_build_ids) if dbgsym_build_ids else "" 

1744 if package_state.binary_version is not None: 

1745 extra_common_params.append(f"-v{package_state.binary_version}") 

1746 

1747 _t64_migration_substvar(binary_package, control_output_dir, substvars) 

1748 

1749 with substvars.flush() as flushed_substvars: 

1750 has_dbgsym = dbgsym_root_fs is not None and any( 

1751 f for f in dbgsym_root_fs.all_paths() if f.is_file 

1752 ) 

1753 dctrl_file = _handle_auto_substvars( 

1754 source_package, 

1755 binary_package, 

1756 substvars, 

1757 has_dbgsym, 

1758 ) 

1759 if dctrl_file is None: 

1760 dctrl_file = "debian/control" 

1761 

1762 if has_dbgsym: 

1763 assert dbgsym_root_fs is not None # mypy hint 

1764 dbgsym_ctrl_dir = binary_package_data.dbgsym_info.dbgsym_ctrl_dir 

1765 _generate_dbgsym_control_file_if_relevant( 

1766 binary_package, 

1767 dbgsym_root_fs, 

1768 dbgsym_ctrl_dir, 

1769 dbgsym_ids, 

1770 ma_value, 

1771 dctrl_file, 

1772 extra_common_params, 

1773 ) 

1774 generate_md5sums_file( 

1775 dbgsym_ctrl_dir, 

1776 dbgsym_root_fs, 

1777 ) 

1778 elif dbgsym_ids: 

1779 extra_common_params.append(f"-DBuild-Ids={dbgsym_ids}") 

1780 

1781 ctrl_file = os.path.join(control_output_dir.fs_path, "control") 

1782 dpkg_cmd = [ 

1783 "dpkg-gencontrol", 

1784 f"-p{package_name}", 

1785 # FIXME: Support d/<pkg>.changelog at some point. 

1786 "-ldebian/changelog", 

1787 f"-c{dctrl_file}", 

1788 f"-T{flushed_substvars}", 

1789 f"-O{ctrl_file}", 

1790 # Use a placeholder for -P to ensure failure if we forgot to override a path parameter 

1791 "-P/non-existent", 

1792 *extra_common_params, 

1793 *extra_params_specific, 

1794 ] 

1795 print_command(*dpkg_cmd) 

1796 try: 

1797 subprocess.check_call(dpkg_cmd) 

1798 except subprocess.CalledProcessError: 

1799 _error( 

1800 f"Attempting to generate DEBIAN/control file for {package_name} failed. Please review the output from " 

1801 " dpkg-gencontrol above to understand what went wrong." 

1802 ) 

1803 os.chmod(ctrl_file, 0o644) 

1804 

1805 if not binary_package.is_udeb: 

1806 generate_md5sums_file(control_output_dir, fs_root)