diff --git a/src/cache_manager.cc b/src/cache_manager.cc index 3b3d843a..2e6109c9 100644 --- a/src/cache_manager.cc +++ b/src/cache_manager.cc @@ -19,14 +19,7 @@ struct RealCacheManager : ICacheManager { void WriteToCache(IndexFile& file) override { std::string cache_path = GetCachePath(file.path); - - if (!file.file_contents_.has_value()) { - LOG_S(ERROR) << "No cached file contents; performing potentially stale " - << "file-copy for " << file.path; - CopyFileTo(cache_path, file.path); - } else { - WriteToFile(cache_path, *file.file_contents_); - } + WriteToFile(cache_path, file.file_contents); std::string indexed_content = Serialize(config_->cacheFormat, file); WriteToFile(AppendSerializationFormat(cache_path), indexed_content); @@ -39,12 +32,13 @@ struct RealCacheManager : ICacheManager { std::unique_ptr RawCacheLoad(const std::string& path) override { std::string cache_path = GetCachePath(path); - optional file_content = + optional file_content = ReadContent(cache_path); + optional serialized_indexed_content = ReadContent(AppendSerializationFormat(cache_path)); - if (!file_content) + if (!file_content || !serialized_indexed_content) return nullptr; - return Deserialize(config_->cacheFormat, path, *file_content, + return Deserialize(config_->cacheFormat, path, *serialized_indexed_content,*file_content, IndexFile::kMajorVersion); } @@ -96,7 +90,7 @@ struct FakeCacheManager : ICacheManager { std::unique_ptr RawCacheLoad(const std::string& path) override { for (const FakeCacheEntry& entry : entries_) { if (entry.path == path) { - return Deserialize(SerializeFormat::Json, path, entry.json, nullopt); + return Deserialize(SerializeFormat::Json, path, entry.json, "", nullopt); } } @@ -109,14 +103,14 @@ struct FakeCacheManager : ICacheManager { } // namespace // static -std::unique_ptr ICacheManager::Make(Config* config) { - return MakeUnique(config); +std::shared_ptr ICacheManager::Make(Config* config) { + return std::make_shared(config); } // static -std::unique_ptr ICacheManager::MakeFake( +std::shared_ptr ICacheManager::MakeFake( const std::vector& entries) { - return MakeUnique(entries); + return std::make_shared(entries); } ICacheManager::~ICacheManager() = default; diff --git a/src/cache_manager.h b/src/cache_manager.h index b0a39c18..e5fd9672 100644 --- a/src/cache_manager.h +++ b/src/cache_manager.h @@ -18,8 +18,8 @@ struct ICacheManager { std::string json; }; - static std::unique_ptr Make(Config* config); - static std::unique_ptr MakeFake( + static std::shared_ptr Make(Config* config); + static std::shared_ptr MakeFake( const std::vector& entries); virtual ~ICacheManager(); diff --git a/src/file_consumer.cc b/src/file_consumer.cc index 20f94e5a..aa47e378 100644 --- a/src/file_consumer.cc +++ b/src/file_consumer.cc @@ -46,7 +46,7 @@ FileConsumer::FileConsumer(FileConsumerSharedState* shared_state, IndexFile* FileConsumer::TryConsumeFile(CXFile file, bool* is_first_ownership, - FileContentsMap* file_contents) { + FileContentsMap* file_contents_map) { assert(is_first_ownership); CXFileUniqueID file_id; @@ -66,39 +66,24 @@ IndexFile* FileConsumer::TryConsumeFile(CXFile file, // No result in local; we need to query global. bool did_insert = shared_->Mark(file_name); - *is_first_ownership = did_insert; - local_[file_id] = - did_insert ? MakeUnique( - file_name, GetFileContents(file_name, file_contents)) - : nullptr; - return local_[file_id].get(); -} - -IndexFile* FileConsumer::ForceLocal(CXFile file, - FileContentsMap* file_contents) { - // Try to fetch the file using the normal system, which will insert the file - // usage into global storage. - { - bool is_first; - IndexFile* cache = TryConsumeFile(file, &is_first, file_contents); - if (cache) - return cache; - } - - // It's already been taken before, just create a local copy. - CXFileUniqueID file_id; - if (clang_getFileUniqueID(file, &file_id) != 0) { - EmitError(file); + + // We did not take the file from global. Cache that we failed so we don't try + // again and return nullptr. + if (!did_insert) { + local_[file_id] = nullptr; return nullptr; } - auto it = local_.find(file_id); - if (it == local_.end() || !it->second) { - std::string file_name = FileName(file); - local_[file_id] = MakeUnique( - file_name, GetFileContents(file_name, file_contents)); + // Read the file contents, if we fail then we cannot index the file. + optional contents = GetFileContents(file_name, file_contents_map); + if (!contents) { + *is_first_ownership = false; + return nullptr; } - assert(local_.find(file_id) != local_.end()); + + // Build IndexFile instance. + *is_first_ownership = true; + local_[file_id] = MakeUnique(file_name, *contents); return local_[file_id].get(); } diff --git a/src/file_consumer.h b/src/file_consumer.h index 12e067a3..6ac94edd 100644 --- a/src/file_consumer.h +++ b/src/file_consumer.h @@ -50,12 +50,6 @@ struct FileConsumer { bool* is_first_ownership, FileContentsMap* file_contents); - // Forcibly create a local file, even if it has already been parsed. - // - // note: file_contents is passed as a parameter instead of as a member - // variable since it is large and we do not want to copy it. - IndexFile* ForceLocal(CXFile file, FileContentsMap* file_contents); - // Returns and passes ownership of all local state. std::vector> TakeLocalState(); diff --git a/src/iindexer.cc b/src/iindexer.cc index a2d4680a..befed64d 100644 --- a/src/iindexer.cc +++ b/src/iindexer.cc @@ -36,10 +36,10 @@ struct TestIndexer : IIndexer { std::vector> indexes; if (entry.num_indexes > 0) - indexes.push_back(MakeUnique(entry.path, nullopt)); + indexes.push_back(MakeUnique(entry.path, "")); for (int i = 1; i < entry.num_indexes; ++i) { indexes.push_back(MakeUnique( - entry.path + "_extra_" + std::to_string(i) + ".h", nullopt)); + entry.path + "_extra_" + std::to_string(i) + ".h", "")); } result->indexes.insert(std::make_pair(entry.path, std::move(indexes))); diff --git a/src/import_pipeline.cc b/src/import_pipeline.cc index 2c6c2091..823cb696 100644 --- a/src/import_pipeline.cc +++ b/src/import_pipeline.cc @@ -118,7 +118,7 @@ ShouldParse FileNeedsParse( TimestampManager* timestamp_manager, IModificationTimestampFetcher* modification_timestamp_fetcher, ImportManager* import_manager, - ICacheManager* cache_manager, + const std::shared_ptr& cache_manager, IndexFile* opt_previous_index, const std::string& path, const std::vector& args, @@ -144,7 +144,7 @@ ShouldParse FileNeedsParse( return ShouldParse::NoSuchFile; optional last_cached_modification = - timestamp_manager->GetLastCachedModificationTime(cache_manager, path); + timestamp_manager->GetLastCachedModificationTime(cache_manager.get(), path); // File has been changed. if (!last_cached_modification || @@ -180,7 +180,7 @@ CacheLoadResult TryLoadFromCache( TimestampManager* timestamp_manager, IModificationTimestampFetcher* modification_timestamp_fetcher, ImportManager* import_manager, - ICacheManager* cache_manager, + const std::shared_ptr& cache_manager, bool is_interactive, const Project::Entry& entry, const std::string& path_to_index) { @@ -237,7 +237,7 @@ CacheLoadResult TryLoadFromCache( PerformanceImportFile perf; std::vector result; - result.push_back(Index_DoIdMap(cache_manager->TakeOrLoad(path_to_index), perf, + result.push_back(Index_DoIdMap(cache_manager->TakeOrLoad(path_to_index), cache_manager, perf, is_interactive, false /*write_to_disk*/)); for (const std::string& dependency : previous_index->dependencies) { // Only load a dependency if it is not already loaded. @@ -258,7 +258,7 @@ CacheLoadResult TryLoadFromCache( if (!dependency_index) continue; - result.push_back(Index_DoIdMap(std::move(dependency_index), perf, + result.push_back(Index_DoIdMap(std::move(dependency_index), cache_manager, perf, is_interactive, false /*write_to_disk*/)); } @@ -267,12 +267,10 @@ CacheLoadResult TryLoadFromCache( } std::vector PreloadFileContents( - ICacheManager* cache_manager, + const std::shared_ptr& cache_manager, const Project::Entry& entry, const std::string& entry_contents, const std::string& path_to_index) { - FileContents contents(entry.filename, entry_contents); - // Load file contents for all dependencies into memory. If the dependencies // for the file changed we may not end up using all of the files we // preloaded. If a new dependency was added the indexer will grab the file @@ -284,31 +282,14 @@ std::vector PreloadFileContents( // TODO: We might be able to optimize perf by only copying for files in // working_files. We can pass that same set of files to the indexer as // well. We then default to a fast file-copy if not in working set. - bool loaded_primary = contents.path == path_to_index; - - std::vector file_contents = {contents}; + bool loaded_entry = false; + std::vector file_contents; cache_manager->IterateLoadedCaches([&](IndexFile* index) { - optional index_content = ReadContent(index->path); - if (!index_content) { - LOG_S(ERROR) << "Failed to load index content for " << index->path; - return; - } - - file_contents.push_back(FileContents(index->path, *index_content)); - - loaded_primary = loaded_primary || index->path == path_to_index; + file_contents.push_back(FileContents(index->path, index->file_contents)); + loaded_entry = loaded_entry || index->path == entry.filename; }); - - if (!loaded_primary) { - optional content = ReadContent(path_to_index); - if (!content) { - // Modification timestamp should have detected this already. - LOG_S(ERROR) << "Skipping index (file cannot be found): " - << path_to_index; - } else { - file_contents.push_back(FileContents(path_to_index, *content)); - } - } + if (!loaded_entry) + file_contents.push_back(FileContents(entry.filename, entry_contents)); return file_contents; } @@ -319,7 +300,6 @@ void ParseFile(Config* config, TimestampManager* timestamp_manager, IModificationTimestampFetcher* modification_timestamp_fetcher, ImportManager* import_manager, - ICacheManager* cache_manager, IIndexer* indexer, const Index_Request& request, const Project::Entry& entry) { @@ -328,7 +308,7 @@ void ParseFile(Config* config, // file is inferred, then try to use the file which originally imported it. std::string path_to_index = entry.filename; if (entry.is_inferred) { - IndexFile* entry_cache = cache_manager->TryLoad(entry.filename); + IndexFile* entry_cache = request.cache_manager->TryLoad(entry.filename); if (entry_cache) path_to_index = entry_cache->import_file; } @@ -336,14 +316,14 @@ void ParseFile(Config* config, // Try to load the file from cache. if (TryLoadFromCache(file_consumer_shared, timestamp_manager, modification_timestamp_fetcher, import_manager, - cache_manager, request.is_interactive, entry, + request.cache_manager, request.is_interactive, entry, path_to_index) == CacheLoadResult::DoNotParse) { return; } LOG_S(INFO) << "Parsing " << path_to_index; std::vector file_contents = PreloadFileContents( - cache_manager, entry, request.contents, path_to_index); + request.cache_manager, entry, request.contents, path_to_index); std::vector result; PerformanceImportFile perf; @@ -374,7 +354,7 @@ void ParseFile(Config* config, // When main thread does IdMap request it will request the previous index if // needed. LOG_S(INFO) << "Emitting index result for " << new_index->path; - result.push_back(Index_DoIdMap(std::move(new_index), perf, + result.push_back(Index_DoIdMap(std::move(new_index), request.cache_manager, perf, request.is_interactive, true /*write_to_disk*/)); } @@ -389,7 +369,6 @@ bool IndexMain_DoParse( TimestampManager* timestamp_manager, IModificationTimestampFetcher* modification_timestamp_fetcher, ImportManager* import_manager, - ICacheManager* cache_manager, IIndexer* indexer) { auto* queue = QueueManager::instance(); optional request = queue->index_request.TryDequeue(); @@ -400,13 +379,12 @@ bool IndexMain_DoParse( entry.filename = request->path; entry.args = request->args; ParseFile(config, working_files, file_consumer_shared, timestamp_manager, - modification_timestamp_fetcher, import_manager, cache_manager, + modification_timestamp_fetcher, import_manager, indexer, request.value(), entry); return true; } -bool IndexMain_DoCreateIndexUpdate(TimestampManager* timestamp_manager, - ICacheManager* cache_manager) { +bool IndexMain_DoCreateIndexUpdate(TimestampManager* timestamp_manager) { auto* queue = QueueManager::instance(); optional response = queue->on_id_mapped.TryDequeue(); if (!response) @@ -434,7 +412,7 @@ bool IndexMain_DoCreateIndexUpdate(TimestampManager* timestamp_manager, LOG_S(INFO) << "Writing cached index to disk for " << response->current->file->path; time.Reset(); - cache_manager->WriteToCache(*response->current->file); + response->cache_manager->WriteToCache(*response->current->file); response->perf.index_save_to_disk = time.ElapsedMicrosecondsAndReset(); timestamp_manager->UpdateCachedModificationTime( response->current->file->path, @@ -471,13 +449,13 @@ bool IndexMain_DoCreateIndexUpdate(TimestampManager* timestamp_manager, return true; } -bool IndexMain_LoadPreviousIndex(ICacheManager* cache_manager) { +bool IndexMain_LoadPreviousIndex() { auto* queue = QueueManager::instance(); optional response = queue->load_previous_index.TryDequeue(); if (!response) return false; - response->previous = cache_manager->TryTakeOrLoad(response->current->path); + response->previous = response->cache_manager->TryTakeOrLoad(response->current->path); LOG_IF_S(ERROR, !response->previous) << "Unable to load previous index for already imported index " << response->current->path; @@ -539,10 +517,12 @@ void IndexWithTuFromCodeCompletion( for (std::unique_ptr& new_index : *indexes) { Timer time; + std::shared_ptr cache_manager; + assert(false && "FIXME cache_manager"); // When main thread does IdMap request it will request the previous index if // needed. LOG_S(INFO) << "Emitting index result for " << new_index->path; - result.push_back(Index_DoIdMap(std::move(new_index), perf, + result.push_back(Index_DoIdMap(std::move(new_index), cache_manager, perf, true /*is_interactive*/, true /*write_to_disk*/)); } @@ -581,19 +561,16 @@ void Indexer_Main(Config* config, // IndexMain_DoCreateIndexUpdate so we don't starve querydb from doing any // work. Running both also lets the user query the partially constructed // index. - std::unique_ptr cache_manager = - ICacheManager::Make(config); did_work = IndexMain_DoParse( - config, working_files, file_consumer_shared, - timestamp_manager, &modification_timestamp_fetcher, - import_manager, cache_manager.get(), indexer.get()) || + config, working_files, file_consumer_shared, + timestamp_manager, &modification_timestamp_fetcher, + import_manager, indexer.get()) || + did_work; + + did_work = IndexMain_DoCreateIndexUpdate(timestamp_manager) || did_work; - did_work = IndexMain_DoCreateIndexUpdate(timestamp_manager, - cache_manager.get()) || - did_work; - - did_work = IndexMain_LoadPreviousIndex(cache_manager.get()) || did_work; + did_work = IndexMain_LoadPreviousIndex() || did_work; // Nothing to index and no index updates to create, so join some already // created index updates to reduce work on querydb thread. @@ -615,7 +592,6 @@ bool QueryDb_ImportMain(Config* config, ImportPipelineStatus* status, SemanticHighlightSymbolCache* semantic_cache, WorkingFiles* working_files) { - std::unique_ptr cache_manager = ICacheManager::Make(config); auto* queue = QueueManager::instance(); ActiveThread active_thread(config, status); @@ -653,7 +629,7 @@ bool QueryDb_ImportMain(Config* config, continue; } - Index_OnIdMapped response(request->perf, request->is_interactive, + Index_OnIdMapped response(request->cache_manager, request->perf, request->is_interactive, request->write_to_disk); Timer time; @@ -681,54 +657,35 @@ bool QueryDb_ImportMain(Config* config, did_work = true; Timer time; - - for (auto& updated_file : response->update.files_def_update) { - // TODO: We're reading a file on querydb thread. This is slow!! If this - // a real problem in practice we can load the file in a previous stage. - // It should be fine though because we only do it if the user has the - // file open. - WorkingFile* working_file = - working_files->GetFileByFilename(updated_file.path); - if (working_file) { - optional cached_file_contents = - cache_manager->LoadCachedFileContents(updated_file.path); - if (cached_file_contents) - working_file->SetIndexContent(*cached_file_contents); - else - working_file->SetIndexContent(working_file->buffer_content); - time.ResetAndPrint( - "Update WorkingFile index contents (via disk load) for " + - updated_file.path); - - // Update inactive region. - EmitInactiveLines(working_file, updated_file.inactive_regions); - } - } - - time.Reset(); db->ApplyIndexUpdate(&response->update); time.ResetAndPrint("Applying index update for " + StringJoinMap(response->update.files_def_update, [](const QueryFile::DefUpdate& value) { - return value.path; + return value.value.path; })); - // Update semantic highlighting. + // Update indexed content, inactive lines, and semantic highlighting. for (auto& updated_file : response->update.files_def_update) { WorkingFile* working_file = - working_files->GetFileByFilename(updated_file.path); + working_files->GetFileByFilename(updated_file.value.path); if (working_file) { + // Update indexed content. + working_file->SetIndexContent(updated_file.file_content); + + // Inactive lines. + EmitInactiveLines(working_file, updated_file.value.inactive_regions); + + // Semantic highlighting. QueryFileId file_id = db->usr_to_file[NormalizedPath(working_file->filename)]; QueryFile* file = &db->files[file_id.id]; EmitSemanticHighlighting(db, semantic_cache, working_file, file); } - } - // Mark the files as being done in querydb stage after we apply the index - // update. - for (auto& updated_file : response->update.files_def_update) - import_manager->DoneQueryDbImport(updated_file.path); + // Mark the files as being done in querydb stage after we apply the index + // update. + import_manager->DoneQueryDbImport(updated_file.value.path); + } } return did_work; @@ -749,7 +706,7 @@ TEST_SUITE("ImportPipeline") { return IndexMain_DoParse(&config, &working_files, &file_consumer_shared, ×tamp_manager, &modification_timestamp_fetcher, &import_manager, - cache_manager.get(), indexer.get()); + indexer.get()); } void MakeRequest(const std::string& path, @@ -757,7 +714,7 @@ TEST_SUITE("ImportPipeline") { bool is_interactive = false, const std::string& contents = "void foo();") { queue->index_request.Enqueue( - Index_Request(path, args, is_interactive, contents)); + Index_Request(path, args, is_interactive, contents, cache_manager)); } MultiQueueWaiter querydb_waiter; @@ -771,7 +728,7 @@ TEST_SUITE("ImportPipeline") { TimestampManager timestamp_manager; FakeModificationTimestampFetcher modification_timestamp_fetcher; ImportManager import_manager; - std::unique_ptr cache_manager; + std::shared_ptr cache_manager; std::unique_ptr indexer; }; @@ -782,7 +739,7 @@ TEST_SUITE("ImportPipeline") { const std::vector& new_args = {}) { std::unique_ptr opt_previous_index; if (!old_args.empty()) { - opt_previous_index = MakeUnique("---.cc", nullopt); + opt_previous_index = MakeUnique("---.cc", ""); opt_previous_index->args = old_args; } optional from; @@ -790,7 +747,7 @@ TEST_SUITE("ImportPipeline") { from = std::string("---.cc"); return FileNeedsParse(is_interactive /*is_interactive*/, ×tamp_manager, &modification_timestamp_fetcher, - &import_manager, cache_manager.get(), + &import_manager, cache_manager, opt_previous_index.get(), file, new_args, from); }; diff --git a/src/indexer.cc b/src/indexer.cc index 47833949..d87d7b77 100644 --- a/src/indexer.cc +++ b/src/indexer.cc @@ -281,17 +281,6 @@ IndexFile* ConsumeFile(IndexParam* param, CXFile file) { << "Failed fetching modification time for " << file_name; if (modification_time) param->file_modification_times[file_name] = *modification_time; - - // Capture file contents in |param->file_contents| if it was not specified - // at the start of indexing. - if (db && !param->file_contents.count(file_name)) { - optional content = ReadContent(file_name); - if (content) - param->file_contents[file_name] = FileContents(file_name, *content); - else - LOG_S(ERROR) << "[indexer] Failed to read file content for " - << file_name; - } } } @@ -569,8 +558,8 @@ const int IndexFile::kMajorVersion = 10; const int IndexFile::kMinorVersion = 1; IndexFile::IndexFile(const std::string& path, - const optional& contents) - : id_cache(path), path(path), file_contents_(contents) { + const std::string& contents) + : id_cache(path), path(path), file_contents(contents) { // TODO: Reconsider if we should still be reusing the same id_cache. // Preallocate any existing resolved ids. for (const auto& entry : id_cache.usr_to_type_id) diff --git a/src/indexer.h b/src/indexer.h index 3b963201..56eb1692 100644 --- a/src/indexer.h +++ b/src/indexer.h @@ -516,9 +516,9 @@ struct IndexFile { // Diagnostics found when indexing this file. Not serialized. std::vector diagnostics_; // File contents at the time of index. Not serialized. - optional file_contents_; + std::string file_contents; - IndexFile(const std::string& path, const optional& contents); + IndexFile(const std::string& path, const std::string& contents); IndexTypeId ToTypeId(Usr usr); IndexFuncId ToFuncId(Usr usr); diff --git a/src/messages/cquery_freshen_index.cc b/src/messages/cquery_freshen_index.cc index 4469fa4f..707e4156 100644 --- a/src/messages/cquery_freshen_index.cc +++ b/src/messages/cquery_freshen_index.cc @@ -37,7 +37,7 @@ struct CqueryFreshenIndexHandler : BaseMessageHandler { GroupMatch matcher(request->params.whitelist, request->params.blacklist); // Unmark all files whose timestamp has changed. - std::unique_ptr cache_manager = ICacheManager::Make(config); + std::shared_ptr cache_manager = ICacheManager::Make(config); std::queue q; // |need_index| stores every filename ever enqueued. @@ -98,7 +98,7 @@ struct CqueryFreshenIndexHandler : BaseMessageHandler { bool is_interactive = working_files->GetFileByFilename(entry.filename) != nullptr; queue->index_request.Enqueue(Index_Request(entry.filename, entry.args, - is_interactive, *content)); + is_interactive, *content, ICacheManager::Make(config))); }); } }; diff --git a/src/messages/cquery_index_file.cc b/src/messages/cquery_index_file.cc index 4ee52f7b..f9889c5b 100644 --- a/src/messages/cquery_index_file.cc +++ b/src/messages/cquery_index_file.cc @@ -1,3 +1,4 @@ +#include "cache_manager.h" #include "message_handler.h" #include "platform.h" #include "queue_manager.h" @@ -28,7 +29,7 @@ struct CqueryIndexFileHandler : BaseMessageHandler { LOG_S(INFO) << "Indexing file " << request->params.path; QueueManager::instance()->index_request.Enqueue(Index_Request( NormalizePath(request->params.path), request->params.args, - request->params.is_interactive, request->params.contents)); + request->params.is_interactive, request->params.contents, ICacheManager::Make(config))); } }; REGISTER_MESSAGE_HANDLER(CqueryIndexFileHandler); diff --git a/src/messages/initialize.cc b/src/messages/initialize.cc index 6c2c57d3..9b6aac7d 100644 --- a/src/messages/initialize.cc +++ b/src/messages/initialize.cc @@ -1,3 +1,4 @@ +#include "cache_manager.h" #include "import_pipeline.h" #include "include_complete.h" #include "message_handler.h" @@ -129,7 +130,7 @@ struct lsServerCapabilities { lsTextDocumentSyncKind textDocumentSync = lsTextDocumentSyncKind::Incremental; // The server provides hover support. - bool hoverProvider = false; + bool hoverProvider = true; // The server provides completion support. lsCompletionOptions completionProvider; // The server provides signature help support. @@ -621,7 +622,7 @@ struct InitializeHandler : BaseMessageHandler { bool is_interactive = working_files->GetFileByFilename(entry.filename) != nullptr; queue->index_request.Enqueue(Index_Request( - entry.filename, entry.args, is_interactive, *content, request->id)); + entry.filename, entry.args, is_interactive, *content, ICacheManager::Make(config), request->id)); }); // We need to support multiple concurrent index processes. diff --git a/src/messages/text_document_did_open.cc b/src/messages/text_document_did_open.cc index c635dac4..1e3e091d 100644 --- a/src/messages/text_document_did_open.cc +++ b/src/messages/text_document_did_open.cc @@ -32,15 +32,13 @@ struct TextDocumentDidOpenHandler if (ShouldIgnoreFileForIndexing(path)) return; - std::unique_ptr cache_manager = ICacheManager::Make(config); + std::shared_ptr cache_manager = ICacheManager::Make(config); WorkingFile* working_file = working_files->OnOpen(request->params.textDocument); optional cached_file_contents = cache_manager->LoadCachedFileContents(path); if (cached_file_contents) working_file->SetIndexContent(*cached_file_contents); - else - working_file->SetIndexContent(working_file->buffer_content); QueryFile* file = nullptr; FindFileOrFail(db, project, nullopt, path, &file); @@ -60,7 +58,7 @@ struct TextDocumentDidOpenHandler const Project::Entry& entry = project->FindCompilationEntryForFile(path); QueueManager::instance()->index_request.PriorityEnqueue( Index_Request(entry.filename, entry.args, true /*is_interactive*/, - request->params.textDocument.text)); + request->params.textDocument.text, cache_manager)); } }; REGISTER_MESSAGE_HANDLER(TextDocumentDidOpenHandler); diff --git a/src/messages/text_document_did_save.cc b/src/messages/text_document_did_save.cc index b76ccbda..256b39c6 100644 --- a/src/messages/text_document_did_save.cc +++ b/src/messages/text_document_did_save.cc @@ -1,3 +1,4 @@ +#include "cache_manager.h" #include "clang_complete.h" #include "message_handler.h" #include "project.h" @@ -49,7 +50,7 @@ struct TextDocumentDidSaveHandler } else { Project::Entry entry = project->FindCompilationEntryForFile(path); QueueManager::instance()->index_request.Enqueue(Index_Request( - entry.filename, entry.args, true /*is_interactive*/, *content)); + entry.filename, entry.args, true /*is_interactive*/, *content, ICacheManager::Make(config))); } clang_complete->NotifySave(path); diff --git a/src/messages/workspace_did_change_watched_files.cc b/src/messages/workspace_did_change_watched_files.cc index 671676e2..a5f4a311 100644 --- a/src/messages/workspace_did_change_watched_files.cc +++ b/src/messages/workspace_did_change_watched_files.cc @@ -1,3 +1,4 @@ +#include "cache_manager.h" #include "clang_complete.h" #include "message_handler.h" #include "project.h" @@ -52,7 +53,7 @@ struct WorkspaceDidChangeWatchedFilesHandler LOG_S(ERROR) << "Unable to read file content after saving " << path; else { QueueManager::instance()->index_request.Enqueue( - Index_Request(path, entry.args, is_interactive, *content)); + Index_Request(path, entry.args, is_interactive, *content, ICacheManager::Make(config))); if (is_interactive) clang_complete->NotifySave(path); } @@ -60,7 +61,7 @@ struct WorkspaceDidChangeWatchedFilesHandler } case lsFileChangeType::Deleted: QueueManager::instance()->index_request.Enqueue( - Index_Request(path, entry.args, is_interactive, std::string())); + Index_Request(path, entry.args, is_interactive, std::string(), ICacheManager::Make(config))); break; } } diff --git a/src/query.cc b/src/query.cc index 8893b74b..dc52c034 100644 --- a/src/query.cc +++ b/src/query.cc @@ -197,7 +197,7 @@ void CompareGroups(std::vector& previous_data, } } -QueryFile::Def BuildFileDef(const IdMap& id_map, const IndexFile& indexed) { +QueryFile::DefUpdate BuildFileDefUpdate(const IdMap& id_map, const IndexFile& indexed) { QueryFile::Def def; def.path = indexed.path; def.includes = indexed.includes; @@ -288,7 +288,7 @@ QueryFile::Def BuildFileDef(const IdMap& id_map, const IndexFile& indexed) { return a.loc.range.start < b.loc.range.start; }); - return def; + return QueryFile::DefUpdate(def, indexed.file_contents); } inline optional GetQueryFileIdFromPath(QueryDatabase* query_db, @@ -502,7 +502,7 @@ IndexUpdate IndexUpdate::CreateDelta(const IdMap* previous_id_map, if (!previous_id_map) { assert(!previous); - IndexFile empty(current->path, nullopt); + IndexFile empty(current->path, ""); return IndexUpdate(*current_id_map, *current_id_map, empty, *current); } return IndexUpdate(*previous_id_map, *current_id_map, *previous, *current); @@ -531,7 +531,7 @@ IndexUpdate::IndexUpdate(const IdMap& previous_id_map, } \ } // File - files_def_update.push_back(BuildFileDef(current_id_map, current_file)); + files_def_update.push_back(BuildFileDefUpdate(current_id_map, current_file)); // **NOTE** We only remove entries if they were defined in the previous index. // For example, if a type is included from another file it will be defined @@ -850,14 +850,14 @@ void QueryDatabase::ImportOrUpdate( // This function runs on the querydb thread. for (auto& def : updates) { - auto it = usr_to_file.find(NormalizedPath(def.path)); + auto it = usr_to_file.find(NormalizedPath(def.value.path)); assert(it != usr_to_file.end()); QueryFile& existing = files[it->second.id]; - existing.def = def; + existing.def = def.value; UpdateDetailedNames(&existing.detailed_name_idx, SymbolKind::File, - it->second.id, def.path, def.path); + it->second.id, def.value.path, def.value.path); } } @@ -963,8 +963,8 @@ TEST_SUITE("query") { } TEST_CASE("remove defs") { - IndexFile previous("foo.cc", nullopt); - IndexFile current("foo.cc", nullopt); + IndexFile previous("foo.cc", ""); + IndexFile current("foo.cc", ""); previous.Resolve(previous.ToTypeId(HashUsr("usr1"))) ->def.definition_spelling = Range(Position(1, 0)); @@ -981,8 +981,8 @@ TEST_SUITE("query") { } TEST_CASE("do not remove ref-only defs") { - IndexFile previous("foo.cc", nullopt); - IndexFile current("foo.cc", nullopt); + IndexFile previous("foo.cc", ""); + IndexFile current("foo.cc", ""); previous.Resolve(previous.ToTypeId(HashUsr("usr1"))) ->uses.push_back(Range(Position(1, 0))); @@ -1000,8 +1000,8 @@ TEST_SUITE("query") { } TEST_CASE("func callers") { - IndexFile previous("foo.cc", nullopt); - IndexFile current("foo.cc", nullopt); + IndexFile previous("foo.cc", ""); + IndexFile current("foo.cc", ""); IndexFunc* pf = previous.Resolve(previous.ToFuncId(HashUsr("usr"))); IndexFunc* cf = current.Resolve(current.ToFuncId(HashUsr("usr"))); @@ -1025,8 +1025,8 @@ TEST_SUITE("query") { } TEST_CASE("type usages") { - IndexFile previous("foo.cc", nullopt); - IndexFile current("foo.cc", nullopt); + IndexFile previous("foo.cc", ""); + IndexFile current("foo.cc", ""); IndexType* pt = previous.Resolve(previous.ToTypeId(HashUsr("usr"))); IndexType* ct = current.Resolve(current.ToTypeId(HashUsr("usr"))); @@ -1046,8 +1046,8 @@ TEST_SUITE("query") { } TEST_CASE("apply delta") { - IndexFile previous("foo.cc", nullopt); - IndexFile current("foo.cc", nullopt); + IndexFile previous("foo.cc", ""); + IndexFile current("foo.cc", ""); IndexFunc* pf = previous.Resolve(previous.ToFuncId(HashUsr("usr"))); IndexFunc* cf = current.Resolve(current.ToFuncId(HashUsr("usr"))); diff --git a/src/query.h b/src/query.h index ea92733a..daee2aff 100644 --- a/src/query.h +++ b/src/query.h @@ -176,6 +176,21 @@ void Reflect(TVisitor& visitor, WithUsr& value) { REFLECT_MEMBER_END(); } +template +struct WithFileContent { + T value; + std::string file_content; + + WithFileContent(const T& value, const std::string& file_content) : value(value), file_content(file_content) {} +}; +template +void Reflect(TVisitor& visitor, WithFileContent& value) { + REFLECT_MEMBER_START(); + REFLECT_MEMBER(value); + REFLECT_MEMBER(file_content); + REFLECT_MEMBER_END(); +} + struct QueryFile { struct Def { std::string path; @@ -193,13 +208,13 @@ struct QueryFile { std::vector dependencies; }; - using DefUpdate = Def; + using DefUpdate = WithFileContent; - optional def; + optional def; size_t detailed_name_idx = (size_t)-1; explicit QueryFile(const std::string& path) { - def = DefUpdate(); + def = Def(); def->path = path; } }; diff --git a/src/queue_manager.cc b/src/queue_manager.cc index 8f52b41b..c9308cad 100644 --- a/src/queue_manager.cc +++ b/src/queue_manager.cc @@ -1,5 +1,6 @@ #include "queue_manager.h" +#include "cache_manager.h" #include "language_server_api.h" #include "query.h" @@ -9,18 +10,22 @@ Index_Request::Index_Request(const std::string& path, const std::vector& args, bool is_interactive, const std::string& contents, + const std::shared_ptr& cache_manager, lsRequestId id) : path(path), args(args), is_interactive(is_interactive), contents(contents), + cache_manager(cache_manager), id(id) {} Index_DoIdMap::Index_DoIdMap(std::unique_ptr current, + const std::shared_ptr& cache_manager, PerformanceImportFile perf, bool is_interactive, bool write_to_disk) : current(std::move(current)), + cache_manager(cache_manager), perf(perf), is_interactive(is_interactive), write_to_disk(write_to_disk) { @@ -31,10 +36,12 @@ Index_OnIdMapped::File::File(std::unique_ptr file, std::unique_ptr ids) : file(std::move(file)), ids(std::move(ids)) {} -Index_OnIdMapped::Index_OnIdMapped(PerformanceImportFile perf, +Index_OnIdMapped::Index_OnIdMapped(const std::shared_ptr& cache_manager, + PerformanceImportFile perf, bool is_interactive, bool write_to_disk) - : perf(perf), + : cache_manager(cache_manager), + perf(perf), is_interactive(is_interactive), write_to_disk(write_to_disk) {} diff --git a/src/queue_manager.h b/src/queue_manager.h index 0eaeffeb..ddd769d3 100644 --- a/src/queue_manager.h +++ b/src/queue_manager.h @@ -7,6 +7,7 @@ #include +struct ICacheManager; struct lsBaseOutMessage; struct Stdout_Request { @@ -19,19 +20,23 @@ struct Index_Request { // TODO: make |args| a string that is parsed lazily. std::vector args; bool is_interactive; - std::string contents; // Preloaded contents. Useful for tests. + std::string contents; // Preloaded contents. + std::shared_ptr cache_manager; lsRequestId id; + Index_Request(const std::string& path, const std::vector& args, bool is_interactive, const std::string& contents, + const std::shared_ptr& cache_manager, lsRequestId id = {}); }; struct Index_DoIdMap { std::unique_ptr current; std::unique_ptr previous; + std::shared_ptr cache_manager; PerformanceImportFile perf; bool is_interactive = false; @@ -39,6 +44,7 @@ struct Index_DoIdMap { bool load_previous = false; Index_DoIdMap(std::unique_ptr current, + const std::shared_ptr& cache_manager, PerformanceImportFile perf, bool is_interactive, bool write_to_disk); @@ -54,12 +60,14 @@ struct Index_OnIdMapped { std::unique_ptr previous; std::unique_ptr current; + std::shared_ptr cache_manager; PerformanceImportFile perf; bool is_interactive; bool write_to_disk; - Index_OnIdMapped(PerformanceImportFile perf, + Index_OnIdMapped(const std::shared_ptr& cache_manager, + PerformanceImportFile perf, bool is_interactive, bool write_to_disk); }; diff --git a/src/serializer.cc b/src/serializer.cc index 7841bb7d..1887758f 100644 --- a/src/serializer.cc +++ b/src/serializer.cc @@ -300,26 +300,30 @@ std::string Serialize(SerializeFormat format, IndexFile& file) { std::unique_ptr Deserialize(SerializeFormat format, const std::string& path, - const std::string& serialized, + const std::string& serialized_index_content, + const std::string& file_content, optional expected_version) { + if (serialized_index_content.empty()) + return nullptr; + std::unique_ptr file; switch (format) { case SerializeFormat::Json: { rapidjson::Document reader; if (gTestOutputMode) - reader.Parse(serialized.c_str()); + reader.Parse(serialized_index_content.c_str()); else { - const char* p = strchr(serialized.c_str(), '\n'); + const char* p = strchr(serialized_index_content.c_str(), '\n'); if (!p) return nullptr; - if (expected_version && atoi(serialized.c_str()) != *expected_version) + if (expected_version && atoi(serialized_index_content.c_str()) != *expected_version) return nullptr; reader.Parse(p + 1); } if (reader.HasParseError()) return nullptr; - file = MakeUnique(path, nullopt); + file = MakeUnique(path, file_content); JsonReader json_reader{&reader}; try { Reflect(json_reader, *file); @@ -332,17 +336,15 @@ std::unique_ptr Deserialize(SerializeFormat format, } case SerializeFormat::MessagePack: { - if (serialized.empty()) - return nullptr; try { int major, minor; - if (serialized.size() < 8) + if (serialized_index_content.size() < 8) throw std::invalid_argument("Invalid"); msgpack::unpacker upk; - upk.reserve_buffer(serialized.size()); - memcpy(upk.buffer(), serialized.data(), serialized.size()); - upk.buffer_consumed(serialized.size()); - file = MakeUnique(path, nullopt); + upk.reserve_buffer(serialized_index_content.size()); + memcpy(upk.buffer(), serialized_index_content.data(), serialized_index_content.size()); + upk.buffer_consumed(serialized_index_content.size()); + file = MakeUnique(path, file_content); MessagePackReader reader(&upk); Reflect(reader, major); Reflect(reader, minor); diff --git a/src/serializer.h b/src/serializer.h index 92f9c8ee..fee42c65 100644 --- a/src/serializer.h +++ b/src/serializer.h @@ -346,7 +346,8 @@ void ReflectMember(Reader& visitor, const char* name, T& value) { std::string Serialize(SerializeFormat format, IndexFile& file); std::unique_ptr Deserialize(SerializeFormat format, const std::string& path, - const std::string& serialized, + const std::string& serialized_index_content, + const std::string& file_content, optional expected_version); void SetTestOutputMode(); diff --git a/src/test.cc b/src/test.cc index 70574da4..3ddd8183 100644 --- a/src/test.cc +++ b/src/test.cc @@ -110,7 +110,7 @@ void DiffDocuments(std::string path, void VerifySerializeToFrom(IndexFile* file) { std::string expected = file->ToString(); std::unique_ptr result = Deserialize( - SerializeFormat::Json, "--.cc", Serialize(SerializeFormat::Json, *file), + SerializeFormat::Json, "--.cc", Serialize(SerializeFormat::Json, *file), "", nullopt /*expected_version*/); std::string actual = result->ToString(); if (expected != actual) { diff --git a/src/utils.cc b/src/utils.cc index 430c2bcc..84bdca07 100644 --- a/src/utils.cc +++ b/src/utils.cc @@ -302,6 +302,7 @@ bool FileExists(const std::string& filename) { } optional ReadContent(const std::string& filename) { + LOG_S(INFO) << "Reading " << filename; std::ifstream cache; cache.open(filename);