WebAssemblyCFGSort.cpp 15.9 KB
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
//===-- WebAssemblyCFGSort.cpp - CFG Sorting ------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file implements a CFG sorting pass.
///
/// This pass reorders the blocks in a function to put them into topological
/// order, ignoring loop backedges, and without any loop or exception being
/// interrupted by a block not dominated by the its header, with special care
/// to keep the order as similar as possible to the original order.
///
////===----------------------------------------------------------------------===//

#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
#include "WebAssembly.h"
#include "WebAssemblyExceptionInfo.h"
#include "WebAssemblySubtarget.h"
#include "WebAssemblyUtilities.h"
#include "llvm/ADT/PriorityQueue.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;

#define DEBUG_TYPE "wasm-cfg-sort"

// Option to disable EH pad first sorting. Only for testing unwind destination
// mismatches in CFGStackify.
static cl::opt<bool> WasmDisableEHPadSort(
    "wasm-disable-ehpad-sort", cl::ReallyHidden,
    cl::desc(
        "WebAssembly: Disable EH pad-first sort order. Testing purpose only."),
    cl::init(false));

namespace {

// Wrapper for loops and exceptions
class Region {
public:
  virtual ~Region() = default;
  virtual MachineBasicBlock *getHeader() const = 0;
  virtual bool contains(const MachineBasicBlock *MBB) const = 0;
  virtual unsigned getNumBlocks() const = 0;
  using block_iterator = typename ArrayRef<MachineBasicBlock *>::const_iterator;
  virtual iterator_range<block_iterator> blocks() const = 0;
  virtual bool isLoop() const = 0;
};

template <typename T> class ConcreteRegion : public Region {
  const T *Region;

public:
  ConcreteRegion(const T *Region) : Region(Region) {}
  MachineBasicBlock *getHeader() const override { return Region->getHeader(); }
  bool contains(const MachineBasicBlock *MBB) const override {
    return Region->contains(MBB);
  }
  unsigned getNumBlocks() const override { return Region->getNumBlocks(); }
  iterator_range<block_iterator> blocks() const override {
    return Region->blocks();
  }
  bool isLoop() const override { return false; }
};

template <> bool ConcreteRegion<MachineLoop>::isLoop() const { return true; }

// This class has information of nested Regions; this is analogous to what
// LoopInfo is for loops.
class RegionInfo {
  const MachineLoopInfo &MLI;
  const WebAssemblyExceptionInfo &WEI;
  DenseMap<const MachineLoop *, std::unique_ptr<Region>> LoopMap;
  DenseMap<const WebAssemblyException *, std::unique_ptr<Region>> ExceptionMap;

public:
  RegionInfo(const MachineLoopInfo &MLI, const WebAssemblyExceptionInfo &WEI)
      : MLI(MLI), WEI(WEI) {}

  // Returns a smallest loop or exception that contains MBB
  const Region *getRegionFor(const MachineBasicBlock *MBB) {
    const auto *ML = MLI.getLoopFor(MBB);
    const auto *WE = WEI.getExceptionFor(MBB);
    if (!ML && !WE)
      return nullptr;
    // We determine subregion relationship by domination of their headers, i.e.,
    // if region A's header dominates region B's header, B is a subregion of A.
    // WebAssemblyException contains BBs in all its subregions (loops or
    // exceptions), but MachineLoop may not, because MachineLoop does not contain
    // BBs that don't have a path to its header even if they are dominated by
    // its header. So here we should use WE->contains(ML->getHeader()), but not
    // ML->contains(WE->getHeader()).
    if ((ML && !WE) || (ML && WE && WE->contains(ML->getHeader()))) {
      // If the smallest region containing MBB is a loop
      if (LoopMap.count(ML))
        return LoopMap[ML].get();
      LoopMap[ML] = std::make_unique<ConcreteRegion<MachineLoop>>(ML);
      return LoopMap[ML].get();
    } else {
      // If the smallest region containing MBB is an exception
      if (ExceptionMap.count(WE))
        return ExceptionMap[WE].get();
      ExceptionMap[WE] =
          std::make_unique<ConcreteRegion<WebAssemblyException>>(WE);
      return ExceptionMap[WE].get();
    }
  }
};

class WebAssemblyCFGSort final : public MachineFunctionPass {
  StringRef getPassName() const override { return "WebAssembly CFG Sort"; }

  void getAnalysisUsage(AnalysisUsage &AU) const override {
    AU.setPreservesCFG();
    AU.addRequired<MachineDominatorTree>();
    AU.addPreserved<MachineDominatorTree>();
    AU.addRequired<MachineLoopInfo>();
    AU.addPreserved<MachineLoopInfo>();
    AU.addRequired<WebAssemblyExceptionInfo>();
    AU.addPreserved<WebAssemblyExceptionInfo>();
    MachineFunctionPass::getAnalysisUsage(AU);
  }

  bool runOnMachineFunction(MachineFunction &MF) override;

public:
  static char ID; // Pass identification, replacement for typeid
  WebAssemblyCFGSort() : MachineFunctionPass(ID) {}
};
} // end anonymous namespace

char WebAssemblyCFGSort::ID = 0;
INITIALIZE_PASS(WebAssemblyCFGSort, DEBUG_TYPE,
                "Reorders blocks in topological order", false, false)

FunctionPass *llvm::createWebAssemblyCFGSort() {
  return new WebAssemblyCFGSort();
}

static void maybeUpdateTerminator(MachineBasicBlock *MBB) {
#ifndef NDEBUG
  bool AnyBarrier = false;
#endif
  bool AllAnalyzable = true;
  for (const MachineInstr &Term : MBB->terminators()) {
#ifndef NDEBUG
    AnyBarrier |= Term.isBarrier();
#endif
    AllAnalyzable &= Term.isBranch() && !Term.isIndirectBranch();
  }
  assert((AnyBarrier || AllAnalyzable) &&
         "analyzeBranch needs to analyze any block with a fallthrough");

  // Find the layout successor from the original block order.
  MachineFunction *MF = MBB->getParent();
  MachineBasicBlock *OriginalSuccessor =
      unsigned(MBB->getNumber() + 1) < MF->getNumBlockIDs()
          ? MF->getBlockNumbered(MBB->getNumber() + 1)
          : nullptr;

  if (AllAnalyzable)
    MBB->updateTerminator(OriginalSuccessor);
}

namespace {
// EH pads are selected first regardless of the block comparison order.
// When only one of the BBs is an EH pad, we give a higher priority to it, to
// prevent common mismatches between possibly throwing calls and ehpads they
// unwind to, as in the example below:
//
// bb0:
//   call @foo      // If this throws, unwind to bb2
// bb1:
//   call @bar      // If this throws, unwind to bb3
// bb2 (ehpad):
//   handler_bb2
// bb3 (ehpad):
//   handler_bb3
// continuing code
//
// Because this pass tries to preserve the original BB order, this order will
// not change. But this will result in this try-catch structure in CFGStackify,
// resulting in a mismatch:
// try
//   try
//     call @foo
//     call @bar    // This should unwind to bb3, not bb2!
//   catch
//     handler_bb2
//   end
// catch
//   handler_bb3
// end
// continuing code
//
// If we give a higher priority to an EH pad whenever it is ready in this
// example, when both bb1 and bb2 are ready, we would pick up bb2 first.

/// Sort blocks by their number.
struct CompareBlockNumbers {
  bool operator()(const MachineBasicBlock *A,
                  const MachineBasicBlock *B) const {
    if (!WasmDisableEHPadSort) {
      if (A->isEHPad() && !B->isEHPad())
        return false;
      if (!A->isEHPad() && B->isEHPad())
        return true;
    }

    return A->getNumber() > B->getNumber();
  }
};
/// Sort blocks by their number in the opposite order..
struct CompareBlockNumbersBackwards {
  bool operator()(const MachineBasicBlock *A,
                  const MachineBasicBlock *B) const {
    if (!WasmDisableEHPadSort) {
      if (A->isEHPad() && !B->isEHPad())
        return false;
      if (!A->isEHPad() && B->isEHPad())
        return true;
    }

    return A->getNumber() < B->getNumber();
  }
};
/// Bookkeeping for a region to help ensure that we don't mix blocks not
/// dominated by the its header among its blocks.
struct Entry {
  const Region *TheRegion;
  unsigned NumBlocksLeft;

  /// List of blocks not dominated by Loop's header that are deferred until
  /// after all of Loop's blocks have been seen.
  std::vector<MachineBasicBlock *> Deferred;

  explicit Entry(const class Region *R)
      : TheRegion(R), NumBlocksLeft(R->getNumBlocks()) {}
};
} // end anonymous namespace

/// Sort the blocks, taking special care to make sure that regions are not
/// interrupted by blocks not dominated by their header.
/// TODO: There are many opportunities for improving the heuristics here.
/// Explore them.
static void sortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI,
                       const WebAssemblyExceptionInfo &WEI,
                       const MachineDominatorTree &MDT) {
  // Remember original layout ordering, so we can update terminators after
  // reordering to point to the original layout successor.
  MF.RenumberBlocks();

  // Prepare for a topological sort: Record the number of predecessors each
  // block has, ignoring loop backedges.
  SmallVector<unsigned, 16> NumPredsLeft(MF.getNumBlockIDs(), 0);
  for (MachineBasicBlock &MBB : MF) {
    unsigned N = MBB.pred_size();
    if (MachineLoop *L = MLI.getLoopFor(&MBB))
      if (L->getHeader() == &MBB)
        for (const MachineBasicBlock *Pred : MBB.predecessors())
          if (L->contains(Pred))
            --N;
    NumPredsLeft[MBB.getNumber()] = N;
  }

  // Topological sort the CFG, with additional constraints:
  //  - Between a region header and the last block in the region, there can be
  //    no blocks not dominated by its header.
  //  - It's desirable to preserve the original block order when possible.
  // We use two ready lists; Preferred and Ready. Preferred has recently
  // processed successors, to help preserve block sequences from the original
  // order. Ready has the remaining ready blocks. EH blocks are picked first
  // from both queues.
  PriorityQueue<MachineBasicBlock *, std::vector<MachineBasicBlock *>,
                CompareBlockNumbers>
      Preferred;
  PriorityQueue<MachineBasicBlock *, std::vector<MachineBasicBlock *>,
                CompareBlockNumbersBackwards>
      Ready;

  RegionInfo RI(MLI, WEI);
  SmallVector<Entry, 4> Entries;
  for (MachineBasicBlock *MBB = &MF.front();;) {
    const Region *R = RI.getRegionFor(MBB);
    if (R) {
      // If MBB is a region header, add it to the active region list. We can't
      // put any blocks that it doesn't dominate until we see the end of the
      // region.
      if (R->getHeader() == MBB)
        Entries.push_back(Entry(R));
      // For each active region the block is in, decrement the count. If MBB is
      // the last block in an active region, take it off the list and pick up
      // any blocks deferred because the header didn't dominate them.
      for (Entry &E : Entries)
        if (E.TheRegion->contains(MBB) && --E.NumBlocksLeft == 0)
          for (auto DeferredBlock : E.Deferred)
            Ready.push(DeferredBlock);
      while (!Entries.empty() && Entries.back().NumBlocksLeft == 0)
        Entries.pop_back();
    }
    // The main topological sort logic.
    for (MachineBasicBlock *Succ : MBB->successors()) {
      // Ignore backedges.
      if (MachineLoop *SuccL = MLI.getLoopFor(Succ))
        if (SuccL->getHeader() == Succ && SuccL->contains(MBB))
          continue;
      // Decrement the predecessor count. If it's now zero, it's ready.
      if (--NumPredsLeft[Succ->getNumber()] == 0)
        Preferred.push(Succ);
    }
    // Determine the block to follow MBB. First try to find a preferred block,
    // to preserve the original block order when possible.
    MachineBasicBlock *Next = nullptr;
    while (!Preferred.empty()) {
      Next = Preferred.top();
      Preferred.pop();
      // If X isn't dominated by the top active region header, defer it until
      // that region is done.
      if (!Entries.empty() &&
          !MDT.dominates(Entries.back().TheRegion->getHeader(), Next)) {
        Entries.back().Deferred.push_back(Next);
        Next = nullptr;
        continue;
      }
      // If Next was originally ordered before MBB, and it isn't because it was
      // loop-rotated above the header, it's not preferred.
      if (Next->getNumber() < MBB->getNumber() &&
          (WasmDisableEHPadSort || !Next->isEHPad()) &&
          (!R || !R->contains(Next) ||
           R->getHeader()->getNumber() < Next->getNumber())) {
        Ready.push(Next);
        Next = nullptr;
        continue;
      }
      break;
    }
    // If we didn't find a suitable block in the Preferred list, check the
    // general Ready list.
    if (!Next) {
      // If there are no more blocks to process, we're done.
      if (Ready.empty()) {
        maybeUpdateTerminator(MBB);
        break;
      }
      for (;;) {
        Next = Ready.top();
        Ready.pop();
        // If Next isn't dominated by the top active region header, defer it
        // until that region is done.
        if (!Entries.empty() &&
            !MDT.dominates(Entries.back().TheRegion->getHeader(), Next)) {
          Entries.back().Deferred.push_back(Next);
          continue;
        }
        break;
      }
    }
    // Move the next block into place and iterate.
    Next->moveAfter(MBB);
    maybeUpdateTerminator(MBB);
    MBB = Next;
  }
  assert(Entries.empty() && "Active sort region list not finished");
  MF.RenumberBlocks();

#ifndef NDEBUG
  SmallSetVector<const Region *, 8> OnStack;

  // Insert a sentinel representing the degenerate loop that starts at the
  // function entry block and includes the entire function as a "loop" that
  // executes once.
  OnStack.insert(nullptr);

  for (auto &MBB : MF) {
    assert(MBB.getNumber() >= 0 && "Renumbered blocks should be non-negative.");
    const Region *Region = RI.getRegionFor(&MBB);

    if (Region && &MBB == Region->getHeader()) {
      // Region header.
      if (Region->isLoop()) {
        // Loop header. The loop predecessor should be sorted above, and the
        // other predecessors should be backedges below.
        for (auto Pred : MBB.predecessors())
          assert(
              (Pred->getNumber() < MBB.getNumber() || Region->contains(Pred)) &&
              "Loop header predecessors must be loop predecessors or "
              "backedges");
      } else {
        // Exception header. All predecessors should be sorted above.
        for (auto Pred : MBB.predecessors())
          assert(Pred->getNumber() < MBB.getNumber() &&
                 "Non-loop-header predecessors should be topologically sorted");
      }
      assert(OnStack.insert(Region) &&
             "Regions should be declared at most once.");

    } else {
      // Not a region header. All predecessors should be sorted above.
      for (auto Pred : MBB.predecessors())
        assert(Pred->getNumber() < MBB.getNumber() &&
               "Non-loop-header predecessors should be topologically sorted");
      assert(OnStack.count(RI.getRegionFor(&MBB)) &&
             "Blocks must be nested in their regions");
    }
    while (OnStack.size() > 1 && &MBB == WebAssembly::getBottom(OnStack.back()))
      OnStack.pop_back();
  }
  assert(OnStack.pop_back_val() == nullptr &&
         "The function entry block shouldn't actually be a region header");
  assert(OnStack.empty() &&
         "Control flow stack pushes and pops should be balanced.");
#endif
}

bool WebAssemblyCFGSort::runOnMachineFunction(MachineFunction &MF) {
  LLVM_DEBUG(dbgs() << "********** CFG Sorting **********\n"
                       "********** Function: "
                    << MF.getName() << '\n');

  const auto &MLI = getAnalysis<MachineLoopInfo>();
  const auto &WEI = getAnalysis<WebAssemblyExceptionInfo>();
  auto &MDT = getAnalysis<MachineDominatorTree>();
  // Liveness is not tracked for VALUE_STACK physreg.
  MF.getRegInfo().invalidateLiveness();

  // Sort the blocks, with contiguous sort regions.
  sortBlocks(MF, MLI, WEI, MDT);

  return true;
}