This source file includes following definitions.
- RoundDown
 
- RoundUp
 
- bytes_in_use_
 
- Free
 
- FreePendingToken
 
- GetLargestFreeSize
 
- GetLargestFreeOrPendingSize
 
- CheckConsistency
 
- InUse
 
- CollapseFreeBlock
 
- WaitForTokenAndFreeBlock
 
- FreeUnused
 
- AllocInBlock
 
- GetBlockByOffset
 
#include "gpu/command_buffer/client/fenced_allocator.h"
#include <algorithm>
#include "gpu/command_buffer/client/cmd_buffer_helper.h"
namespace gpu {
namespace {
const unsigned int kAllocAlignment = 16;
unsigned int RoundDown(unsigned int size) {
  return size & ~(kAllocAlignment - 1);
}
unsigned int RoundUp(unsigned int size) {
  return (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1);
}
}  
#ifndef _MSC_VER
const FencedAllocator::Offset FencedAllocator::kInvalidOffset;
#endif
FencedAllocator::FencedAllocator(unsigned int size,
                                 CommandBufferHelper* helper,
                                 const base::Closure& poll_callback)
    : helper_(helper),
      poll_callback_(poll_callback),
      bytes_in_use_(0) {
  Block block = { FREE, 0, RoundDown(size), kUnusedToken };
  blocks_.push_back(block);
}
FencedAllocator::~FencedAllocator() {
  
  for (unsigned int i = 0; i < blocks_.size(); ++i) {
    if (blocks_[i].state == FREE_PENDING_TOKEN) {
      i = WaitForTokenAndFreeBlock(i);
    }
  }
  
  
  
}
FencedAllocator::Offset FencedAllocator::Alloc(unsigned int size) {
  
  
  if (size == 0)  {
    return kInvalidOffset;
  }
  
  size = RoundUp(size);
  
  for (unsigned int i = 0; i < blocks_.size(); ++i) {
    Block &block = blocks_[i];
    if (block.state == FREE && block.size >= size) {
      return AllocInBlock(i, size);
    }
  }
  
  
  for (unsigned int i = 0; i < blocks_.size(); ++i) {
    if (blocks_[i].state != FREE_PENDING_TOKEN)
      continue;
    i = WaitForTokenAndFreeBlock(i);
    if (blocks_[i].size >= size)
      return AllocInBlock(i, size);
  }
  return kInvalidOffset;
}
void FencedAllocator::Free(FencedAllocator::Offset offset) {
  BlockIndex index = GetBlockByOffset(offset);
  DCHECK_NE(blocks_[index].state, FREE);
  Block &block = blocks_[index];
  if (block.state == IN_USE)
    bytes_in_use_ -= block.size;
  block.state = FREE;
  CollapseFreeBlock(index);
}
void FencedAllocator::FreePendingToken(
    FencedAllocator::Offset offset, int32 token) {
  BlockIndex index = GetBlockByOffset(offset);
  Block &block = blocks_[index];
  if (block.state == IN_USE)
    bytes_in_use_ -= block.size;
  block.state = FREE_PENDING_TOKEN;
  block.token = token;
}
unsigned int FencedAllocator::GetLargestFreeSize() {
  FreeUnused();
  unsigned int max_size = 0;
  for (unsigned int i = 0; i < blocks_.size(); ++i) {
    Block &block = blocks_[i];
    if (block.state == FREE)
      max_size = std::max(max_size, block.size);
  }
  return max_size;
}
unsigned int FencedAllocator::GetLargestFreeOrPendingSize() {
  unsigned int max_size = 0;
  unsigned int current_size = 0;
  for (unsigned int i = 0; i < blocks_.size(); ++i) {
    Block &block = blocks_[i];
    if (block.state == IN_USE) {
      max_size = std::max(max_size, current_size);
      current_size = 0;
    } else {
      DCHECK(block.state == FREE || block.state == FREE_PENDING_TOKEN);
      current_size += block.size;
    }
  }
  return std::max(max_size, current_size);
}
bool FencedAllocator::CheckConsistency() {
  if (blocks_.size() < 1) return false;
  for (unsigned int i = 0; i < blocks_.size() - 1; ++i) {
    Block ¤t = blocks_[i];
    Block &next = blocks_[i + 1];
    
    if (next.offset <= current.offset)
      return false;
    if (next.offset != current.offset + current.size)
      return false;
    if (current.state == FREE && next.state == FREE)
      return false;
  }
  return true;
}
bool FencedAllocator::InUse() {
  return blocks_.size() != 1 || blocks_[0].state != FREE;
}
FencedAllocator::BlockIndex FencedAllocator::CollapseFreeBlock(
    BlockIndex index) {
  if (index + 1 < blocks_.size()) {
    Block &next = blocks_[index + 1];
    if (next.state == FREE) {
      blocks_[index].size += next.size;
      blocks_.erase(blocks_.begin() + index + 1);
    }
  }
  if (index > 0) {
    Block &prev = blocks_[index - 1];
    if (prev.state == FREE) {
      prev.size += blocks_[index].size;
      blocks_.erase(blocks_.begin() + index);
      --index;
    }
  }
  return index;
}
FencedAllocator::BlockIndex FencedAllocator::WaitForTokenAndFreeBlock(
    BlockIndex index) {
  Block &block = blocks_[index];
  DCHECK_EQ(block.state, FREE_PENDING_TOKEN);
  helper_->WaitForToken(block.token);
  block.state = FREE;
  return CollapseFreeBlock(index);
}
void FencedAllocator::FreeUnused() {
  
  poll_callback_.Run();
  for (unsigned int i = 0; i < blocks_.size();) {
    Block& block = blocks_[i];
    if (block.state == FREE_PENDING_TOKEN &&
        helper_->HasTokenPassed(block.token)) {
      block.state = FREE;
      i = CollapseFreeBlock(i);
    } else {
      ++i;
    }
  }
}
FencedAllocator::Offset FencedAllocator::AllocInBlock(BlockIndex index,
                                                      unsigned int size) {
  Block &block = blocks_[index];
  DCHECK_GE(block.size, size);
  DCHECK_EQ(block.state, FREE);
  Offset offset = block.offset;
  bytes_in_use_ += size;
  if (block.size == size) {
    block.state = IN_USE;
    return offset;
  }
  Block newblock = { FREE, offset + size, block.size - size, kUnusedToken};
  block.state = IN_USE;
  block.size = size;
  
  blocks_.insert(blocks_.begin() + index + 1, newblock);
  return offset;
}
FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) {
  Block templ = { IN_USE, offset, 0, kUnusedToken };
  Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(),
                                            templ, OffsetCmp());
  DCHECK(it != blocks_.end() && it->offset == offset);
  return it-blocks_.begin();
}
}