Source file src/cmd/compile/internal/ssa/tighten.go

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package ssa
     6  
     7  import "cmd/compile/internal/base"
     8  
     9  // tighten moves Values closer to the Blocks in which they are used.
    10  // This can reduce the amount of register spilling required,
    11  // if it doesn't also create more live values.
    12  // A Value can be moved to any block that
    13  // dominates all blocks in which it is used.
    14  func tighten(f *Func) {
    15  	if base.Flag.N != 0 && len(f.Blocks) < 10000 {
    16  		// Skip the optimization in -N mode, except for huge functions.
    17  		// Too many values live across blocks can cause pathological
    18  		// behavior in the register allocator (see issue 52180).
    19  		return
    20  	}
    21  
    22  	canMove := f.Cache.allocBoolSlice(f.NumValues())
    23  	defer f.Cache.freeBoolSlice(canMove)
    24  
    25  	// Compute the memory states of each block.
    26  	startMem := f.Cache.allocValueSlice(f.NumBlocks())
    27  	defer f.Cache.freeValueSlice(startMem)
    28  	endMem := f.Cache.allocValueSlice(f.NumBlocks())
    29  	defer f.Cache.freeValueSlice(endMem)
    30  	distinctArgs := f.newSparseSet(f.NumValues())
    31  	defer f.retSparseSet(distinctArgs)
    32  	memState(f, startMem, endMem)
    33  
    34  	for _, b := range f.Blocks {
    35  		for _, v := range b.Values {
    36  			if v.Op.isLoweredGetClosurePtr() {
    37  				// Must stay in the entry block.
    38  				continue
    39  			}
    40  			switch v.Op {
    41  			case OpPhi, OpArg, OpArgIntReg, OpArgFloatReg, OpSelect0, OpSelect1, OpSelectN:
    42  				// Phis need to stay in their block.
    43  				// Arg must stay in the entry block.
    44  				// Tuple selectors must stay with the tuple generator.
    45  				// SelectN is typically, ultimately, a register.
    46  				continue
    47  			}
    48  			if opcodeTable[v.Op].nilCheck {
    49  				// Nil checks need to stay in their block. See issue 72860.
    50  				continue
    51  			}
    52  			// Count distinct arguments which will need a register.
    53  			distinctArgs.clear()
    54  
    55  			for _, a := range v.Args {
    56  				// SP and SB are special registers and have no effect on
    57  				// the allocation of general-purpose registers.
    58  				if a.needRegister() && a.Op != OpSB && a.Op != OpSP {
    59  					distinctArgs.add(a.ID)
    60  				}
    61  			}
    62  
    63  			if distinctArgs.size() >= 2 && !v.Type.IsFlags() {
    64  				// Don't move values with more than one input, as that may
    65  				// increase register pressure.
    66  				// We make an exception for flags, as we want flag generators
    67  				// moved next to uses (because we only have 1 flag register).
    68  				continue
    69  			}
    70  			canMove[v.ID] = true
    71  		}
    72  	}
    73  
    74  	// Build data structure for fast least-common-ancestor queries.
    75  	lca := makeLCArange(f)
    76  
    77  	// For each moveable value, record the block that dominates all uses found so far.
    78  	target := f.Cache.allocBlockSlice(f.NumValues())
    79  	defer f.Cache.freeBlockSlice(target)
    80  
    81  	// Grab loop information.
    82  	// We use this to make sure we don't tighten a value into a (deeper) loop.
    83  	idom := f.Idom()
    84  	loops := f.loopnest()
    85  
    86  	changed := true
    87  	for changed {
    88  		changed = false
    89  
    90  		// Reset target
    91  		clear(target)
    92  
    93  		// Compute target locations (for moveable values only).
    94  		// target location = the least common ancestor of all uses in the dominator tree.
    95  		for _, b := range f.Blocks {
    96  			for _, v := range b.Values {
    97  				for i, a := range v.Args {
    98  					if !canMove[a.ID] {
    99  						continue
   100  					}
   101  					use := b
   102  					if v.Op == OpPhi {
   103  						use = b.Preds[i].b
   104  					}
   105  					if target[a.ID] == nil {
   106  						target[a.ID] = use
   107  					} else {
   108  						target[a.ID] = lca.find(target[a.ID], use)
   109  					}
   110  				}
   111  			}
   112  			for _, c := range b.ControlValues() {
   113  				if !canMove[c.ID] {
   114  					continue
   115  				}
   116  				if target[c.ID] == nil {
   117  					target[c.ID] = b
   118  				} else {
   119  					target[c.ID] = lca.find(target[c.ID], b)
   120  				}
   121  			}
   122  		}
   123  
   124  		// If the target location is inside a loop,
   125  		// move the target location up to just before the loop head.
   126  		for _, b := range f.Blocks {
   127  			origloop := loops.b2l[b.ID]
   128  			for _, v := range b.Values {
   129  				t := target[v.ID]
   130  				if t == nil {
   131  					continue
   132  				}
   133  				targetloop := loops.b2l[t.ID]
   134  				for targetloop != nil && (origloop == nil || targetloop.depth > origloop.depth) {
   135  					t = idom[targetloop.header.ID]
   136  					target[v.ID] = t
   137  					targetloop = loops.b2l[t.ID]
   138  				}
   139  			}
   140  		}
   141  
   142  		// Move values to target locations.
   143  		for _, b := range f.Blocks {
   144  			for i := 0; i < len(b.Values); i++ {
   145  				v := b.Values[i]
   146  				t := target[v.ID]
   147  				if t == nil || t == b {
   148  					// v is not moveable, or is already in correct place.
   149  					continue
   150  				}
   151  				if mem := v.MemoryArg(); mem != nil {
   152  					if startMem[t.ID] != mem {
   153  						// We can't move a value with a memory arg unless the target block
   154  						// has that memory arg as its starting memory.
   155  						continue
   156  					}
   157  				}
   158  				if f.pass.debug > 0 {
   159  					b.Func.Warnl(v.Pos, "%v is moved", v.Op)
   160  				}
   161  				// Move v to the block which dominates its uses.
   162  				t.Values = append(t.Values, v)
   163  				v.Block = t
   164  				last := len(b.Values) - 1
   165  				b.Values[i] = b.Values[last]
   166  				b.Values[last] = nil
   167  				b.Values = b.Values[:last]
   168  				changed = true
   169  				i--
   170  			}
   171  		}
   172  	}
   173  }
   174  
   175  // phiTighten moves constants closer to phi users.
   176  // This pass avoids having lots of constants live for lots of the program.
   177  // See issue 16407.
   178  func phiTighten(f *Func) {
   179  	for _, b := range f.Blocks {
   180  		for _, v := range b.Values {
   181  			if v.Op != OpPhi {
   182  				continue
   183  			}
   184  			for i, a := range v.Args {
   185  				if !a.rematerializeable() {
   186  					continue // not a constant we can move around
   187  				}
   188  				if a.Block == b.Preds[i].b {
   189  					continue // already in the right place
   190  				}
   191  				// Make a copy of a, put in predecessor block.
   192  				v.SetArg(i, a.copyInto(b.Preds[i].b))
   193  			}
   194  		}
   195  	}
   196  }
   197  
   198  // memState computes the memory state at the beginning and end of each block of
   199  // the function. The memory state is represented by a value of mem type.
   200  // The returned result is stored in startMem and endMem, and endMem is nil for
   201  // blocks with no successors (Exit,Ret,RetJmp blocks). This algorithm is not
   202  // suitable for infinite loop blocks that do not contain any mem operations.
   203  // For example:
   204  // b1:
   205  //
   206  //	(some values)
   207  //
   208  // plain -> b2
   209  // b2: <- b1 b2
   210  // Plain -> b2
   211  //
   212  // Algorithm introduction:
   213  //  1. The start memory state of a block is InitMem, a Phi node of type mem or
   214  //     an incoming memory value.
   215  //  2. The start memory state of a block is consistent with the end memory state
   216  //     of its parent nodes. If the start memory state of a block is a Phi value,
   217  //     then the end memory state of its parent nodes is consistent with the
   218  //     corresponding argument value of the Phi node.
   219  //  3. The algorithm first obtains the memory state of some blocks in the tree
   220  //     in the first step. Then floods the known memory state to other nodes in
   221  //     the second step.
   222  func memState(f *Func, startMem, endMem []*Value) {
   223  	// This slice contains the set of blocks that have had their startMem set but this
   224  	// startMem value has not yet been propagated to the endMem of its predecessors
   225  	changed := make([]*Block, 0)
   226  	// First step, init the memory state of some blocks.
   227  	for _, b := range f.Blocks {
   228  		for _, v := range b.Values {
   229  			var mem *Value
   230  			if v.Op == OpPhi {
   231  				if v.Type.IsMemory() {
   232  					mem = v
   233  				}
   234  			} else if v.Op == OpInitMem {
   235  				mem = v // This is actually not needed.
   236  			} else if a := v.MemoryArg(); a != nil && a.Block != b {
   237  				// The only incoming memory value doesn't belong to this block.
   238  				mem = a
   239  			}
   240  			if mem != nil {
   241  				if old := startMem[b.ID]; old != nil {
   242  					if old == mem {
   243  						continue
   244  					}
   245  					f.Fatalf("func %s, startMem[%v] has different values, old %v, new %v", f.Name, b, old, mem)
   246  				}
   247  				startMem[b.ID] = mem
   248  				changed = append(changed, b)
   249  			}
   250  		}
   251  	}
   252  
   253  	// Second step, floods the known memory state of some blocks to others.
   254  	for len(changed) != 0 {
   255  		top := changed[0]
   256  		changed = changed[1:]
   257  		mem := startMem[top.ID]
   258  		for i, p := range top.Preds {
   259  			pb := p.b
   260  			if endMem[pb.ID] != nil {
   261  				continue
   262  			}
   263  			if mem.Op == OpPhi && mem.Block == top {
   264  				endMem[pb.ID] = mem.Args[i]
   265  			} else {
   266  				endMem[pb.ID] = mem
   267  			}
   268  			if startMem[pb.ID] == nil {
   269  				startMem[pb.ID] = endMem[pb.ID]
   270  				changed = append(changed, pb)
   271  			}
   272  		}
   273  	}
   274  }
   275  

View as plain text