This source file includes following definitions.
- GetHeaderDBKey
- IsChildEntryKey
- GetCacheEntryKey
- IsCacheEntryKey
- GetIdFromCacheEntryKey
- GetIdEntryKey
- IsIdEntryKey
- LevelDBStatusToDBInitStatus
- GetDefaultHeaderEntry
- MoveIfPossible
- IsAtEnd
- GetID
- GetValue
- GetCacheEntry
- Advance
- HasError
- IsAtEnd
- GetID
- GetValue
- Advance
- HasError
- AdvanceInternal
- UpgradeOldDB
- blocking_task_runner_
- Destroy
- Initialize
- RecoverCacheInfoFromTrashedResourceMap
- SetLargestChangestamp
- GetLargestChangestamp
- PutEntry
- GetEntry
- RemoveEntry
- GetIterator
- GetChild
- GetChildren
- PutCacheEntry
- GetCacheEntry
- RemoveCacheEntry
- GetCacheEntryIterator
- GetIdByResourceId
- DestroyOnBlockingPool
- GetChildEntryKey
- PutHeader
- GetHeader
- CheckValidity
#include "chrome/browser/chromeos/drive/resource_metadata_storage.h"
#include "base/bind.h"
#include "base/file_util.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/metrics/histogram.h"
#include "base/metrics/sparse_histogram.h"
#include "base/sequenced_task_runner.h"
#include "base/threading/thread_restrictions.h"
#include "chrome/browser/chromeos/drive/drive.pb.h"
#include "third_party/leveldatabase/src/include/leveldb/db.h"
#include "third_party/leveldatabase/src/include/leveldb/write_batch.h"
namespace drive {
namespace internal {
namespace {
enum DBInitStatus {
DB_INIT_SUCCESS,
DB_INIT_NOT_FOUND,
DB_INIT_CORRUPTION,
DB_INIT_IO_ERROR,
DB_INIT_FAILED,
DB_INIT_INCOMPATIBLE,
DB_INIT_BROKEN,
DB_INIT_OPENED_EXISTING_DB,
DB_INIT_CREATED_NEW_DB,
DB_INIT_REPLACED_EXISTING_DB_WITH_NEW_DB,
DB_INIT_MAX_VALUE,
};
const base::FilePath::CharType kResourceMapDBName[] =
FILE_PATH_LITERAL("resource_metadata_resource_map.db");
const base::FilePath::CharType kPreservedResourceMapDBName[] =
FILE_PATH_LITERAL("resource_metadata_preserved_resource_map.db");
const base::FilePath::CharType kTrashedResourceMapDBName[] =
FILE_PATH_LITERAL("resource_metadata_trashed_resource_map.db");
const char kDBKeyDelimeter = '\0';
const char kCacheEntryKeySuffix[] = "CACHE";
const char kIdEntryKeyPrefix[] = "ID";
std::string GetHeaderDBKey() {
std::string key;
key.push_back(kDBKeyDelimeter);
key.append("HEADER");
return key;
}
bool IsChildEntryKey(const leveldb::Slice& key) {
return !key.empty() && key[key.size() - 1] == kDBKeyDelimeter;
}
std::string GetCacheEntryKey(const std::string& id) {
std::string key(id);
key.push_back(kDBKeyDelimeter);
key.append(kCacheEntryKeySuffix);
return key;
}
bool IsCacheEntryKey(const leveldb::Slice& key) {
const leveldb::Slice expected_suffix(kCacheEntryKeySuffix,
arraysize(kCacheEntryKeySuffix) - 1);
if (key.size() < 1 + expected_suffix.size() ||
key[key.size() - expected_suffix.size() - 1] != kDBKeyDelimeter)
return false;
const leveldb::Slice key_substring(
key.data() + key.size() - expected_suffix.size(), expected_suffix.size());
return key_substring.compare(expected_suffix) == 0;
}
std::string GetIdFromCacheEntryKey(const leveldb::Slice& key) {
DCHECK(IsCacheEntryKey(key));
const size_t kSuffixLength = arraysize(kCacheEntryKeySuffix) - 1;
const int id_length = key.size() - 1 - kSuffixLength;
return std::string(key.data(), id_length);
}
std::string GetIdEntryKey(const std::string& resource_id) {
std::string key;
key.push_back(kDBKeyDelimeter);
key.append(kIdEntryKeyPrefix);
key.push_back(kDBKeyDelimeter);
key.append(resource_id);
return key;
}
bool IsIdEntryKey(const leveldb::Slice& key) {
const leveldb::Slice expected_prefix(kIdEntryKeyPrefix,
arraysize(kIdEntryKeyPrefix) - 1);
if (key.size() < 2 + expected_prefix.size())
return false;
const leveldb::Slice key_substring(key.data() + 1, expected_prefix.size());
return key[0] == kDBKeyDelimeter &&
key_substring.compare(expected_prefix) == 0 &&
key[expected_prefix.size() + 1] == kDBKeyDelimeter;
}
DBInitStatus LevelDBStatusToDBInitStatus(const leveldb::Status status) {
if (status.ok())
return DB_INIT_SUCCESS;
if (status.IsNotFound())
return DB_INIT_NOT_FOUND;
if (status.IsCorruption())
return DB_INIT_CORRUPTION;
if (status.IsIOError())
return DB_INIT_IO_ERROR;
return DB_INIT_FAILED;
}
ResourceMetadataHeader GetDefaultHeaderEntry() {
ResourceMetadataHeader header;
header.set_version(ResourceMetadataStorage::kDBVersion);
return header;
}
bool MoveIfPossible(const base::FilePath& from, const base::FilePath& to) {
return !base::PathExists(from) || base::Move(from, to);
}
}
ResourceMetadataStorage::Iterator::Iterator(scoped_ptr<leveldb::Iterator> it)
: it_(it.Pass()) {
base::ThreadRestrictions::AssertIOAllowed();
DCHECK(it_);
it_->Seek(leveldb::Slice(GetHeaderDBKey()));
Advance();
}
ResourceMetadataStorage::Iterator::~Iterator() {
base::ThreadRestrictions::AssertIOAllowed();
}
bool ResourceMetadataStorage::Iterator::IsAtEnd() const {
base::ThreadRestrictions::AssertIOAllowed();
return !it_->Valid();
}
std::string ResourceMetadataStorage::Iterator::GetID() const {
return it_->key().ToString();
}
const ResourceEntry& ResourceMetadataStorage::Iterator::GetValue() const {
base::ThreadRestrictions::AssertIOAllowed();
DCHECK(!IsAtEnd());
return entry_;
}
bool ResourceMetadataStorage::Iterator::GetCacheEntry(
FileCacheEntry* cache_entry) {
base::ThreadRestrictions::AssertIOAllowed();
DCHECK(!IsAtEnd());
std::string current_key = it_->key().ToString();
std::string cache_entry_key = GetCacheEntryKey(current_key);
it_->Seek(leveldb::Slice(cache_entry_key));
bool success = it_->Valid() &&
it_->key().compare(cache_entry_key) == 0 &&
cache_entry->ParseFromArray(it_->value().data(), it_->value().size());
it_->Seek(leveldb::Slice(current_key));
DCHECK(!IsAtEnd());
DCHECK_EQ(current_key, it_->key().ToString());
return success;
}
void ResourceMetadataStorage::Iterator::Advance() {
base::ThreadRestrictions::AssertIOAllowed();
DCHECK(!IsAtEnd());
for (it_->Next() ; it_->Valid(); it_->Next()) {
if (!IsChildEntryKey(it_->key()) &&
!IsCacheEntryKey(it_->key()) &&
!IsIdEntryKey(it_->key()) &&
entry_.ParseFromArray(it_->value().data(), it_->value().size()))
break;
}
}
bool ResourceMetadataStorage::Iterator::HasError() const {
base::ThreadRestrictions::AssertIOAllowed();
return !it_->status().ok();
}
ResourceMetadataStorage::CacheEntryIterator::CacheEntryIterator(
scoped_ptr<leveldb::Iterator> it) : it_(it.Pass()) {
base::ThreadRestrictions::AssertIOAllowed();
DCHECK(it_);
it_->SeekToFirst();
AdvanceInternal();
}
ResourceMetadataStorage::CacheEntryIterator::~CacheEntryIterator() {
base::ThreadRestrictions::AssertIOAllowed();
}
bool ResourceMetadataStorage::CacheEntryIterator::IsAtEnd() const {
base::ThreadRestrictions::AssertIOAllowed();
return !it_->Valid();
}
const std::string& ResourceMetadataStorage::CacheEntryIterator::GetID() const {
base::ThreadRestrictions::AssertIOAllowed();
DCHECK(!IsAtEnd());
return id_;
}
const FileCacheEntry&
ResourceMetadataStorage::CacheEntryIterator::GetValue() const {
base::ThreadRestrictions::AssertIOAllowed();
DCHECK(!IsAtEnd());
return entry_;
}
void ResourceMetadataStorage::CacheEntryIterator::Advance() {
base::ThreadRestrictions::AssertIOAllowed();
DCHECK(!IsAtEnd());
it_->Next();
AdvanceInternal();
}
bool ResourceMetadataStorage::CacheEntryIterator::HasError() const {
base::ThreadRestrictions::AssertIOAllowed();
return !it_->status().ok();
}
void ResourceMetadataStorage::CacheEntryIterator::AdvanceInternal() {
for (; it_->Valid(); it_->Next()) {
if (IsCacheEntryKey(it_->key()) &&
entry_.ParseFromArray(it_->value().data(), it_->value().size())) {
id_ = GetIdFromCacheEntryKey(it_->key());
break;
}
}
}
bool ResourceMetadataStorage::UpgradeOldDB(
const base::FilePath& directory_path,
const ResourceIdCanonicalizer& id_canonicalizer) {
base::ThreadRestrictions::AssertIOAllowed();
COMPILE_ASSERT(
kDBVersion == 12,
db_version_and_this_function_should_be_updated_at_the_same_time);
const base::FilePath resource_map_path =
directory_path.Append(kResourceMapDBName);
const base::FilePath preserved_resource_map_path =
directory_path.Append(kPreservedResourceMapDBName);
if (base::PathExists(preserved_resource_map_path)) {
if (!base::DeleteFile(resource_map_path, false ) ||
!base::Move(preserved_resource_map_path, resource_map_path))
return false;
}
if (!base::PathExists(resource_map_path))
return false;
leveldb::DB* db = NULL;
leveldb::Options options;
options.max_open_files = 0;
options.create_if_missing = false;
if (!leveldb::DB::Open(options, resource_map_path.AsUTF8Unsafe(), &db).ok())
return false;
scoped_ptr<leveldb::DB> resource_map(db);
std::string serialized_header;
ResourceMetadataHeader header;
if (!resource_map->Get(leveldb::ReadOptions(),
leveldb::Slice(GetHeaderDBKey()),
&serialized_header).ok() ||
!header.ParseFromString(serialized_header))
return false;
UMA_HISTOGRAM_SPARSE_SLOWLY("Drive.MetadataDBVersionBeforeUpgradeCheck",
header.version());
if (header.version() == kDBVersion) {
return true;
} else if (header.version() < 6) {
return false;
} else if (header.version() < 11) {
leveldb::ReadOptions options;
options.verify_checksums = true;
scoped_ptr<leveldb::Iterator> it(resource_map->NewIterator(options));
leveldb::WriteBatch batch;
for (it->SeekToFirst(); it->Valid(); it->Next()) {
if (IsCacheEntryKey(it->key())) {
const std::string& id = GetIdFromCacheEntryKey(it->key());
const std::string& id_new = id_canonicalizer.Run(id);
if (id != id_new) {
batch.Delete(it->key());
batch.Put(GetCacheEntryKey(id_new), it->value());
}
batch.Put(GetIdEntryKey(id_new), id_new);
} else {
batch.Delete(it->key());
}
}
if (!it->status().ok())
return false;
std::string serialized_header;
if (!GetDefaultHeaderEntry().SerializeToString(&serialized_header))
return false;
batch.Put(GetHeaderDBKey(), serialized_header);
return resource_map->Write(leveldb::WriteOptions(), &batch).ok();
} else if (header.version() < 12) {
leveldb::ReadOptions options;
options.verify_checksums = true;
scoped_ptr<leveldb::Iterator> it(resource_map->NewIterator(options));
leveldb::WriteBatch batch;
for (it->SeekToFirst(); it->Valid(); it->Next()) {
if (!IsCacheEntryKey(it->key()) && !IsIdEntryKey(it->key()))
batch.Delete(it->key());
}
if (!it->status().ok())
return false;
std::string serialized_header;
if (!GetDefaultHeaderEntry().SerializeToString(&serialized_header))
return false;
batch.Put(GetHeaderDBKey(), serialized_header);
return resource_map->Write(leveldb::WriteOptions(), &batch).ok();
}
LOG(WARNING) << "Unexpected DB version: " << header.version();
return false;
}
ResourceMetadataStorage::ResourceMetadataStorage(
const base::FilePath& directory_path,
base::SequencedTaskRunner* blocking_task_runner)
: directory_path_(directory_path),
cache_file_scan_is_needed_(true),
blocking_task_runner_(blocking_task_runner) {
}
void ResourceMetadataStorage::Destroy() {
blocking_task_runner_->PostTask(
FROM_HERE,
base::Bind(&ResourceMetadataStorage::DestroyOnBlockingPool,
base::Unretained(this)));
}
bool ResourceMetadataStorage::Initialize() {
base::ThreadRestrictions::AssertIOAllowed();
resource_map_.reset();
const base::FilePath resource_map_path =
directory_path_.Append(kResourceMapDBName);
const base::FilePath preserved_resource_map_path =
directory_path_.Append(kPreservedResourceMapDBName);
const base::FilePath trashed_resource_map_path =
directory_path_.Append(kTrashedResourceMapDBName);
if (!base::DeleteFile(preserved_resource_map_path, true ) ||
!base::DeleteFile(trashed_resource_map_path, true )) {
LOG(ERROR) << "Failed to remove unneeded DBs.";
return false;
}
leveldb::DB* db = NULL;
leveldb::Options options;
options.max_open_files = 0;
options.create_if_missing = false;
DBInitStatus open_existing_result = DB_INIT_NOT_FOUND;
leveldb::Status status;
if (base::PathExists(resource_map_path)) {
status = leveldb::DB::Open(options, resource_map_path.AsUTF8Unsafe(), &db);
open_existing_result = LevelDBStatusToDBInitStatus(status);
}
if (open_existing_result == DB_INIT_SUCCESS) {
resource_map_.reset(db);
int db_version = -1;
ResourceMetadataHeader header;
if (GetHeader(&header))
db_version = header.version();
bool should_discard_db = true;
if (db_version != kDBVersion) {
open_existing_result = DB_INIT_INCOMPATIBLE;
DVLOG(1) << "Reject incompatible DB.";
} else if (!CheckValidity()) {
open_existing_result = DB_INIT_BROKEN;
LOG(ERROR) << "Reject invalid DB.";
} else {
should_discard_db = false;
}
if (should_discard_db)
resource_map_.reset();
else
cache_file_scan_is_needed_ = false;
}
UMA_HISTOGRAM_ENUMERATION("Drive.MetadataDBOpenExistingResult",
open_existing_result,
DB_INIT_MAX_VALUE);
DBInitStatus init_result = DB_INIT_OPENED_EXISTING_DB;
if (!resource_map_) {
MoveIfPossible(resource_map_path, preserved_resource_map_path);
options.max_open_files = 0;
options.create_if_missing = true;
options.error_if_exists = true;
status = leveldb::DB::Open(options, resource_map_path.AsUTF8Unsafe(), &db);
if (status.ok()) {
resource_map_.reset(db);
if (PutHeader(GetDefaultHeaderEntry()) &&
MoveIfPossible(preserved_resource_map_path,
trashed_resource_map_path)) {
init_result = open_existing_result == DB_INIT_NOT_FOUND ?
DB_INIT_CREATED_NEW_DB : DB_INIT_REPLACED_EXISTING_DB_WITH_NEW_DB;
} else {
init_result = DB_INIT_FAILED;
resource_map_.reset();
}
} else {
LOG(ERROR) << "Failed to create resource map DB: " << status.ToString();
init_result = LevelDBStatusToDBInitStatus(status);
}
}
UMA_HISTOGRAM_ENUMERATION("Drive.MetadataDBInitResult",
init_result,
DB_INIT_MAX_VALUE);
return resource_map_;
}
void ResourceMetadataStorage::RecoverCacheInfoFromTrashedResourceMap(
RecoveredCacheInfoMap* out_info) {
const base::FilePath trashed_resource_map_path =
directory_path_.Append(kTrashedResourceMapDBName);
if (!base::PathExists(trashed_resource_map_path))
return;
leveldb::Options options;
options.max_open_files = 0;
options.create_if_missing = false;
leveldb::Status status;
status = leveldb::RepairDB(trashed_resource_map_path.AsUTF8Unsafe(), options);
if (!status.ok()) {
LOG(ERROR) << "Failed to repair trashed DB: " << status.ToString();
return;
}
leveldb::DB* db = NULL;
status = leveldb::DB::Open(options, trashed_resource_map_path.AsUTF8Unsafe(),
&db);
if (!status.ok()) {
LOG(ERROR) << "Failed to open trashed DB: " << status.ToString();
return;
}
scoped_ptr<leveldb::DB> resource_map(db);
std::string serialized_header;
ResourceMetadataHeader header;
if (!resource_map->Get(leveldb::ReadOptions(),
leveldb::Slice(GetHeaderDBKey()),
&serialized_header).ok() ||
!header.ParseFromString(serialized_header) ||
header.version() != kDBVersion) {
LOG(ERROR) << "Incompatible DB version: " << header.version();
return;
}
scoped_ptr<leveldb::Iterator> it(
resource_map->NewIterator(leveldb::ReadOptions()));
for (it->SeekToFirst(); it->Valid(); it->Next()) {
if (IsCacheEntryKey(it->key())) {
const std::string& id = GetIdFromCacheEntryKey(it->key());
FileCacheEntry cache_entry;
if (cache_entry.ParseFromArray(it->value().data(), it->value().size())) {
RecoveredCacheInfo* info = &(*out_info)[id];
info->is_dirty = cache_entry.is_dirty();
info->md5 = cache_entry.md5();
std::string serialized_entry;
ResourceEntry entry;
if (resource_map->Get(leveldb::ReadOptions(),
leveldb::Slice(id),
&serialized_entry).ok() &&
entry.ParseFromString(serialized_entry))
info->title = entry.title();
}
}
}
}
bool ResourceMetadataStorage::SetLargestChangestamp(
int64 largest_changestamp) {
base::ThreadRestrictions::AssertIOAllowed();
ResourceMetadataHeader header;
if (!GetHeader(&header)) {
DLOG(ERROR) << "Failed to get the header.";
return false;
}
header.set_largest_changestamp(largest_changestamp);
return PutHeader(header);
}
int64 ResourceMetadataStorage::GetLargestChangestamp() {
base::ThreadRestrictions::AssertIOAllowed();
ResourceMetadataHeader header;
if (!GetHeader(&header)) {
DLOG(ERROR) << "Failed to get the header.";
return 0;
}
return header.largest_changestamp();
}
bool ResourceMetadataStorage::PutEntry(const ResourceEntry& entry) {
base::ThreadRestrictions::AssertIOAllowed();
const std::string& id = entry.local_id();
DCHECK(!id.empty());
std::string serialized_entry;
leveldb::Status status = resource_map_->Get(leveldb::ReadOptions(),
leveldb::Slice(id),
&serialized_entry);
if (!status.ok() && !status.IsNotFound())
return false;
ResourceEntry old_entry;
if (status.ok() && !old_entry.ParseFromString(serialized_entry))
return false;
leveldb::WriteBatch batch;
if (!old_entry.parent_local_id().empty()) {
batch.Delete(GetChildEntryKey(old_entry.parent_local_id(),
old_entry.base_name()));
}
if (!entry.parent_local_id().empty())
batch.Put(GetChildEntryKey(entry.parent_local_id(), entry.base_name()), id);
if (old_entry.resource_id() != entry.resource_id()) {
DCHECK(old_entry.resource_id().empty() || entry.resource_id().empty());
if (!old_entry.resource_id().empty())
batch.Delete(GetIdEntryKey(old_entry.resource_id()));
if (!entry.resource_id().empty())
batch.Put(GetIdEntryKey(entry.resource_id()), id);
}
if (!entry.SerializeToString(&serialized_entry)) {
DLOG(ERROR) << "Failed to serialize the entry: " << id;
return false;
}
batch.Put(id, serialized_entry);
status = resource_map_->Write(leveldb::WriteOptions(), &batch);
return status.ok();
}
bool ResourceMetadataStorage::GetEntry(const std::string& id,
ResourceEntry* out_entry) {
base::ThreadRestrictions::AssertIOAllowed();
DCHECK(!id.empty());
std::string serialized_entry;
const leveldb::Status status = resource_map_->Get(leveldb::ReadOptions(),
leveldb::Slice(id),
&serialized_entry);
return status.ok() && out_entry->ParseFromString(serialized_entry);
}
bool ResourceMetadataStorage::RemoveEntry(const std::string& id) {
base::ThreadRestrictions::AssertIOAllowed();
DCHECK(!id.empty());
ResourceEntry entry;
if (!GetEntry(id, &entry))
return false;
leveldb::WriteBatch batch;
if (!entry.parent_local_id().empty())
batch.Delete(GetChildEntryKey(entry.parent_local_id(), entry.base_name()));
if (!entry.resource_id().empty())
batch.Delete(GetIdEntryKey(entry.resource_id()));
batch.Delete(id);
const leveldb::Status status = resource_map_->Write(leveldb::WriteOptions(),
&batch);
return status.ok();
}
scoped_ptr<ResourceMetadataStorage::Iterator>
ResourceMetadataStorage::GetIterator() {
base::ThreadRestrictions::AssertIOAllowed();
scoped_ptr<leveldb::Iterator> it(
resource_map_->NewIterator(leveldb::ReadOptions()));
return make_scoped_ptr(new Iterator(it.Pass()));
}
std::string ResourceMetadataStorage::GetChild(const std::string& parent_id,
const std::string& child_name) {
base::ThreadRestrictions::AssertIOAllowed();
DCHECK(!parent_id.empty());
DCHECK(!child_name.empty());
std::string child_id;
resource_map_->Get(leveldb::ReadOptions(),
leveldb::Slice(GetChildEntryKey(parent_id, child_name)),
&child_id);
return child_id;
}
void ResourceMetadataStorage::GetChildren(const std::string& parent_id,
std::vector<std::string>* children) {
base::ThreadRestrictions::AssertIOAllowed();
DCHECK(!parent_id.empty());
scoped_ptr<leveldb::Iterator> it(
resource_map_->NewIterator(leveldb::ReadOptions()));
for (it->Seek(parent_id);
it->Valid() && it->key().starts_with(leveldb::Slice(parent_id));
it->Next()) {
if (IsChildEntryKey(it->key()))
children->push_back(it->value().ToString());
}
DCHECK(it->status().ok());
}
bool ResourceMetadataStorage::PutCacheEntry(const std::string& id,
const FileCacheEntry& entry) {
base::ThreadRestrictions::AssertIOAllowed();
DCHECK(!id.empty());
std::string serialized_entry;
if (!entry.SerializeToString(&serialized_entry)) {
DLOG(ERROR) << "Failed to serialize the entry.";
return false;
}
const leveldb::Status status = resource_map_->Put(
leveldb::WriteOptions(),
leveldb::Slice(GetCacheEntryKey(id)),
leveldb::Slice(serialized_entry));
return status.ok();
}
bool ResourceMetadataStorage::GetCacheEntry(const std::string& id,
FileCacheEntry* out_entry) {
base::ThreadRestrictions::AssertIOAllowed();
DCHECK(!id.empty());
std::string serialized_entry;
const leveldb::Status status = resource_map_->Get(
leveldb::ReadOptions(),
leveldb::Slice(GetCacheEntryKey(id)),
&serialized_entry);
return status.ok() && out_entry->ParseFromString(serialized_entry);
}
bool ResourceMetadataStorage::RemoveCacheEntry(const std::string& id) {
base::ThreadRestrictions::AssertIOAllowed();
DCHECK(!id.empty());
const leveldb::Status status = resource_map_->Delete(
leveldb::WriteOptions(),
leveldb::Slice(GetCacheEntryKey(id)));
return status.ok();
}
scoped_ptr<ResourceMetadataStorage::CacheEntryIterator>
ResourceMetadataStorage::GetCacheEntryIterator() {
base::ThreadRestrictions::AssertIOAllowed();
scoped_ptr<leveldb::Iterator> it(
resource_map_->NewIterator(leveldb::ReadOptions()));
return make_scoped_ptr(new CacheEntryIterator(it.Pass()));
}
ResourceMetadataStorage::RecoveredCacheInfo::RecoveredCacheInfo()
: is_dirty(false) {}
ResourceMetadataStorage::RecoveredCacheInfo::~RecoveredCacheInfo() {}
bool ResourceMetadataStorage::GetIdByResourceId(
const std::string& resource_id,
std::string* out_id) {
base::ThreadRestrictions::AssertIOAllowed();
DCHECK(!resource_id.empty());
const leveldb::Status status = resource_map_->Get(
leveldb::ReadOptions(),
leveldb::Slice(GetIdEntryKey(resource_id)),
out_id);
return status.ok();
}
ResourceMetadataStorage::~ResourceMetadataStorage() {
base::ThreadRestrictions::AssertIOAllowed();
}
void ResourceMetadataStorage::DestroyOnBlockingPool() {
delete this;
}
std::string ResourceMetadataStorage::GetChildEntryKey(
const std::string& parent_id,
const std::string& child_name) {
DCHECK(!parent_id.empty());
DCHECK(!child_name.empty());
std::string key = parent_id;
key.push_back(kDBKeyDelimeter);
key.append(child_name);
key.push_back(kDBKeyDelimeter);
return key;
}
bool ResourceMetadataStorage::PutHeader(
const ResourceMetadataHeader& header) {
base::ThreadRestrictions::AssertIOAllowed();
std::string serialized_header;
if (!header.SerializeToString(&serialized_header)) {
DLOG(ERROR) << "Failed to serialize the header";
return false;
}
const leveldb::Status status = resource_map_->Put(
leveldb::WriteOptions(),
leveldb::Slice(GetHeaderDBKey()),
leveldb::Slice(serialized_header));
return status.ok();
}
bool ResourceMetadataStorage::GetHeader(ResourceMetadataHeader* header) {
base::ThreadRestrictions::AssertIOAllowed();
std::string serialized_header;
const leveldb::Status status = resource_map_->Get(
leveldb::ReadOptions(),
leveldb::Slice(GetHeaderDBKey()),
&serialized_header);
return status.ok() && header->ParseFromString(serialized_header);
}
bool ResourceMetadataStorage::CheckValidity() {
base::ThreadRestrictions::AssertIOAllowed();
leveldb::ReadOptions options;
options.verify_checksums = true;
scoped_ptr<leveldb::Iterator> it(resource_map_->NewIterator(options));
it->SeekToFirst();
ResourceMetadataHeader header;
if (!it->Valid() ||
it->key() != GetHeaderDBKey() ||
!header.ParseFromArray(it->value().data(), it->value().size()) ||
header.version() != kDBVersion) {
DLOG(ERROR) << "Invalid header detected. version = " << header.version();
return false;
}
size_t num_entries_with_parent = 0;
size_t num_child_entries = 0;
ResourceEntry entry;
std::string serialized_entry;
std::string child_id;
for (it->Next(); it->Valid(); it->Next()) {
if (IsChildEntryKey(it->key())) {
++num_child_entries;
continue;
}
if (IsCacheEntryKey(it->key()))
continue;
if (IsIdEntryKey(it->key())) {
leveldb::Status status = resource_map_->Get(
options, it->value(), &serialized_entry);
if (status.IsNotFound())
continue;
const bool ok = status.ok() &&
entry.ParseFromString(serialized_entry) &&
!entry.resource_id().empty() &&
leveldb::Slice(GetIdEntryKey(entry.resource_id())) == it->key();
if (!ok) {
DLOG(ERROR) << "Broken ID entry. status = " << status.ToString();
return false;
}
continue;
}
if (!entry.ParseFromArray(it->value().data(), it->value().size())) {
DLOG(ERROR) << "Broken entry detected";
return false;
}
if (!entry.parent_local_id().empty()) {
leveldb::Status status = resource_map_->Get(
options,
leveldb::Slice(entry.parent_local_id()),
&serialized_entry);
if (!status.ok()) {
DLOG(ERROR) << "Can't get parent entry. status = " << status.ToString();
return false;
}
status = resource_map_->Get(
options,
leveldb::Slice(GetChildEntryKey(entry.parent_local_id(),
entry.base_name())),
&child_id);
if (!status.ok() || leveldb::Slice(child_id) != it->key()) {
DLOG(ERROR) << "Child map is broken. status = " << status.ToString();
return false;
}
++num_entries_with_parent;
}
}
if (!it->status().ok() || num_child_entries != num_entries_with_parent) {
DLOG(ERROR) << "Error during checking resource map. status = "
<< it->status().ToString();
return false;
}
return true;
}
}
}