mirror of
https://github.com/MaskRay/ccls.git
synced 2024-11-25 17:11:59 +00:00
Fix some issues in import pipeline.
This commit is contained in:
parent
87394de1cf
commit
145975df6b
@ -750,8 +750,10 @@ std::vector<Index_DoIdMap> DoParseFile(
|
|||||||
const optional<FileContents>& contents) {
|
const optional<FileContents>& contents) {
|
||||||
std::vector<Index_DoIdMap> result;
|
std::vector<Index_DoIdMap> result;
|
||||||
|
|
||||||
|
// Always run this block, even if we are interactive, so we can check
|
||||||
|
// dependencies and reset files in |file_consumer_shared|.
|
||||||
IndexFile* previous_index = cache_loader->TryLoad(path);
|
IndexFile* previous_index = cache_loader->TryLoad(path);
|
||||||
if (previous_index && !is_interactive) {
|
if (previous_index) {
|
||||||
// If none of the dependencies have changed and the index is not
|
// If none of the dependencies have changed and the index is not
|
||||||
// interactive (ie, requested by a file save), skip parsing and just load
|
// interactive (ie, requested by a file save), skip parsing and just load
|
||||||
// from cache.
|
// from cache.
|
||||||
@ -762,8 +764,10 @@ std::vector<Index_DoIdMap> DoParseFile(
|
|||||||
auto file_needs_parse = [&](const std::string& path, bool is_dependency) {
|
auto file_needs_parse = [&](const std::string& path, bool is_dependency) {
|
||||||
// If the file is a dependency but another file as already imported it,
|
// If the file is a dependency but another file as already imported it,
|
||||||
// don't bother.
|
// don't bother.
|
||||||
if (is_dependency && !import_manager->TryMarkDependencyImported(path))
|
if (!is_interactive && is_dependency &&
|
||||||
|
!import_manager->TryMarkDependencyImported(path)) {
|
||||||
return FileParseQuery::DoesNotNeedParse;
|
return FileParseQuery::DoesNotNeedParse;
|
||||||
|
}
|
||||||
|
|
||||||
optional<int64_t> modification_timestamp = GetLastModificationTime(path);
|
optional<int64_t> modification_timestamp = GetLastModificationTime(path);
|
||||||
if (!modification_timestamp)
|
if (!modification_timestamp)
|
||||||
@ -786,7 +790,8 @@ std::vector<Index_DoIdMap> DoParseFile(
|
|||||||
FileParseQuery path_state = file_needs_parse(path, false /*is_dependency*/);
|
FileParseQuery path_state = file_needs_parse(path, false /*is_dependency*/);
|
||||||
if (path_state == FileParseQuery::BadFile)
|
if (path_state == FileParseQuery::BadFile)
|
||||||
return result;
|
return result;
|
||||||
bool needs_reparse = path_state == FileParseQuery::NeedsParse;
|
bool needs_reparse =
|
||||||
|
is_interactive || path_state == FileParseQuery::NeedsParse;
|
||||||
|
|
||||||
for (const std::string& dependency : previous_index->dependencies) {
|
for (const std::string& dependency : previous_index->dependencies) {
|
||||||
assert(!dependency.empty());
|
assert(!dependency.empty());
|
||||||
@ -950,8 +955,6 @@ bool IndexMain_DoCreateIndexUpdate(Config* config,
|
|||||||
IdMap* previous_id_map = nullptr;
|
IdMap* previous_id_map = nullptr;
|
||||||
IndexFile* previous_index = nullptr;
|
IndexFile* previous_index = nullptr;
|
||||||
if (response->previous) {
|
if (response->previous) {
|
||||||
LOG_S(INFO) << "Creating delta update for "
|
|
||||||
<< response->previous->file->path;
|
|
||||||
previous_id_map = response->previous->ids.get();
|
previous_id_map = response->previous->ids.get();
|
||||||
previous_index = response->previous->file.get();
|
previous_index = response->previous->file.get();
|
||||||
}
|
}
|
||||||
@ -961,6 +964,8 @@ bool IndexMain_DoCreateIndexUpdate(Config* config,
|
|||||||
IndexUpdate::CreateDelta(previous_id_map, response->current->ids.get(),
|
IndexUpdate::CreateDelta(previous_id_map, response->current->ids.get(),
|
||||||
previous_index, response->current->file.get());
|
previous_index, response->current->file.get());
|
||||||
response->perf.index_make_delta = time.ElapsedMicrosecondsAndReset();
|
response->perf.index_make_delta = time.ElapsedMicrosecondsAndReset();
|
||||||
|
LOG_S(INFO) << "Built index update for " << response->current->file->path
|
||||||
|
<< " (is_delta=" << !!response->previous << ")";
|
||||||
|
|
||||||
// Write current index to disk if requested.
|
// Write current index to disk if requested.
|
||||||
if (response->write_to_disk) {
|
if (response->write_to_disk) {
|
||||||
@ -1102,11 +1107,6 @@ bool QueryDb_ImportMain(Config* config,
|
|||||||
break;
|
break;
|
||||||
did_work = true;
|
did_work = true;
|
||||||
|
|
||||||
// Check if the file is already being imported into querydb. If it is, drop
|
|
||||||
// the request.
|
|
||||||
if (!import_manager->StartQueryDbImport(request->current->path))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
// If the request does not have previous state and we have already imported
|
// If the request does not have previous state and we have already imported
|
||||||
// it, load the previous state from disk and rerun IdMap logic later. Do not
|
// it, load the previous state from disk and rerun IdMap logic later. Do not
|
||||||
// do this if we have already attempted in the past.
|
// do this if we have already attempted in the past.
|
||||||
@ -1119,6 +1119,17 @@ bool QueryDb_ImportMain(Config* config,
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check if the file is already being imported into querydb. If it is, drop
|
||||||
|
// the request.
|
||||||
|
//
|
||||||
|
// Note, we must do this *after* we have checked for the previous index,
|
||||||
|
// otherwise we will never actually generate the IdMap.
|
||||||
|
if (!import_manager->StartQueryDbImport(request->current->path)) {
|
||||||
|
LOG_S(INFO) << "Dropping index as it is already being imported for "
|
||||||
|
<< request->current->path;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
Index_OnIdMapped response(request->perf, request->is_interactive,
|
Index_OnIdMapped response(request->perf, request->is_interactive,
|
||||||
request->write_to_disk);
|
request->write_to_disk);
|
||||||
Timer time;
|
Timer time;
|
||||||
@ -1168,10 +1179,6 @@ bool QueryDb_ImportMain(Config* config,
|
|||||||
"Update WorkingFile index contents (via disk load) for " +
|
"Update WorkingFile index contents (via disk load) for " +
|
||||||
updated_file.path);
|
updated_file.path);
|
||||||
}
|
}
|
||||||
|
|
||||||
// PERF: This will acquire a lock. If querydb ends being up being slow we
|
|
||||||
// could push this request to another queue which runs on an indexer.
|
|
||||||
import_manager->DoneQueryDbImport(updated_file.path);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Reset();
|
time.Reset();
|
||||||
@ -1181,6 +1188,11 @@ bool QueryDb_ImportMain(Config* config,
|
|||||||
[](const QueryFile::DefUpdate& value) {
|
[](const QueryFile::DefUpdate& value) {
|
||||||
return value.path;
|
return value.path;
|
||||||
}));
|
}));
|
||||||
|
|
||||||
|
// Mark the files as being done in querydb stage after we apply the index
|
||||||
|
// update.
|
||||||
|
for (auto& updated_file : response->update.files_def_update)
|
||||||
|
import_manager->DoneQueryDbImport(updated_file.path);
|
||||||
}
|
}
|
||||||
|
|
||||||
return did_work;
|
return did_work;
|
||||||
@ -1294,7 +1306,7 @@ bool QueryDbMainLoop(Config* config,
|
|||||||
std::cerr << "[querydb] Starting " << config->indexerCount
|
std::cerr << "[querydb] Starting " << config->indexerCount
|
||||||
<< " indexers" << std::endl;
|
<< " indexers" << std::endl;
|
||||||
for (int i = 0; i < config->indexerCount; ++i) {
|
for (int i = 0; i < config->indexerCount; ++i) {
|
||||||
WorkThread::StartThread("indexer" + std::to_string(i), [&]() {
|
WorkThread::StartThread("indexer" + std::to_string(i), [=]() {
|
||||||
return IndexMain(config, file_consumer_shared, timestamp_manager,
|
return IndexMain(config, file_consumer_shared, timestamp_manager,
|
||||||
import_manager, project, working_files, waiter,
|
import_manager, project, working_files, waiter,
|
||||||
queue);
|
queue);
|
||||||
@ -1361,8 +1373,8 @@ bool QueryDbMainLoop(Config* config,
|
|||||||
lsSignatureHelpOptions();
|
lsSignatureHelpOptions();
|
||||||
// NOTE: If updating signature help tokens make sure to also update
|
// NOTE: If updating signature help tokens make sure to also update
|
||||||
// WorkingFile::FindClosestCallNameInBuffer.
|
// WorkingFile::FindClosestCallNameInBuffer.
|
||||||
response.result.capabilities.signatureHelpProvider
|
response.result.capabilities.signatureHelpProvider->triggerCharacters =
|
||||||
->triggerCharacters = {"(", ","};
|
{"(", ","};
|
||||||
|
|
||||||
response.result.capabilities.codeLensProvider = lsCodeLensOptions();
|
response.result.capabilities.codeLensProvider = lsCodeLensOptions();
|
||||||
response.result.capabilities.codeLensProvider->resolveProvider = false;
|
response.result.capabilities.codeLensProvider->resolveProvider = false;
|
||||||
@ -1393,6 +1405,28 @@ bool QueryDbMainLoop(Config* config,
|
|||||||
|
|
||||||
case IpcId::CqueryFreshenIndex: {
|
case IpcId::CqueryFreshenIndex: {
|
||||||
LOG_S(INFO) << "Freshening " << project->entries.size() << " files";
|
LOG_S(INFO) << "Freshening " << project->entries.size() << " files";
|
||||||
|
|
||||||
|
// TODO: think about this flow and test it more.
|
||||||
|
|
||||||
|
// Unmark all files whose timestamp has changed.
|
||||||
|
CacheLoader cache_loader(config);
|
||||||
|
for (const auto& file : db->files) {
|
||||||
|
if (!file)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
optional<int64_t> modification_timestamp =
|
||||||
|
GetLastModificationTime(file->def.path);
|
||||||
|
if (!modification_timestamp)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
optional<int64_t> cached_modification =
|
||||||
|
timestamp_manager->GetLastCachedModificationTime(&cache_loader,
|
||||||
|
file->def.path);
|
||||||
|
if (modification_timestamp != cached_modification)
|
||||||
|
file_consumer_shared->Reset(file->def.path);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send index requests for every file.
|
||||||
project->ForAllFilteredFiles(
|
project->ForAllFilteredFiles(
|
||||||
config, [&](int i, const Project::Entry& entry) {
|
config, [&](int i, const Project::Entry& entry) {
|
||||||
LOG_S(INFO) << "[" << i << "/" << (project->entries.size() - 1)
|
LOG_S(INFO) << "[" << i << "/" << (project->entries.size() - 1)
|
||||||
@ -2980,8 +3014,8 @@ int main(int argc, char** argv) {
|
|||||||
return 0;
|
return 0;
|
||||||
} else if (HasOption(options, "--language-server")) {
|
} else if (HasOption(options, "--language-server")) {
|
||||||
// std::cerr << "Running language server" << std::endl;
|
// std::cerr << "Running language server" << std::endl;
|
||||||
Config config;
|
auto config = MakeUnique<Config>();
|
||||||
LanguageServerMain(argv[0], &config, &waiter);
|
LanguageServerMain(argv[0], config.get(), &waiter);
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
std::cout << R"help(cquery help:
|
std::cout << R"help(cquery help:
|
||||||
|
Loading…
Reference in New Issue
Block a user