Progress output improvements

- Don't emit so many progress messages
- Allow user to control how often progress is emitted
- Include number of active threads in progress
This commit is contained in:
Jacob Dufault 2018-01-07 13:06:18 -08:00
parent 7f6f63b702
commit 09d9d5eedc
7 changed files with 62 additions and 15 deletions

View File

@ -100,6 +100,7 @@ bool QueryDbMainLoop(Config* config,
Project* project, Project* project,
FileConsumerSharedState* file_consumer_shared, FileConsumerSharedState* file_consumer_shared,
ImportManager* import_manager, ImportManager* import_manager,
ImportPipelineStatus* status,
TimestampManager* timestamp_manager, TimestampManager* timestamp_manager,
SemanticHighlightSymbolCache* semantic_cache, SemanticHighlightSymbolCache* semantic_cache,
WorkingFiles* working_files, WorkingFiles* working_files,
@ -132,7 +133,7 @@ bool QueryDbMainLoop(Config* config,
// TODO: consider rate-limiting and checking for IPC messages so we don't // TODO: consider rate-limiting and checking for IPC messages so we don't
// block requests / we can serve partial requests. // block requests / we can serve partial requests.
if (QueryDb_ImportMain(config, db, import_manager, semantic_cache, if (QueryDb_ImportMain(config, db, import_manager, status, semantic_cache,
working_files)) { working_files)) {
did_work = true; did_work = true;
} }
@ -190,10 +191,11 @@ void RunQueryDbThread(const std::string& bin_name,
SetCurrentThreadName("querydb"); SetCurrentThreadName("querydb");
while (true) { while (true) {
bool did_work = QueryDbMainLoop( bool did_work = QueryDbMainLoop(
config, &db, querydb_waiter, &project, &file_consumer_shared, &import_manager, config, &db, querydb_waiter, &project, &file_consumer_shared,
&timestamp_manager, &semantic_cache, &working_files, &clang_complete, &import_manager, &import_pipeline_status, &timestamp_manager,
&include_complete, global_code_complete_cache.get(), &semantic_cache, &working_files, &clang_complete, &include_complete,
non_global_code_complete_cache.get(), signature_cache.get()); global_code_complete_cache.get(), non_global_code_complete_cache.get(),
signature_cache.get());
// Cleanup and free any unused memory. // Cleanup and free any unused memory.
FreeUnusedMemory(); FreeUnusedMemory();

View File

@ -39,7 +39,17 @@ struct Config {
bool enableCacheRead = true; bool enableCacheRead = true;
// If true, cquery will send progress reports while indexing // If true, cquery will send progress reports while indexing
bool enableProgressReports = true; // How often should cquery send progress report messages?
// -1: never
// 0: as often as possible
// xxx: at most every xxx milliseconds
//
// Empty progress reports (ie, idle) are delivered as often as they are
// available and may exceed this value.
//
// This does not guarantee a progress report will be delivered every
// interval; it could take significantly longer if cquery is completely idle.
int progressReportFrequencyMs = 500;
// If true, document links are reported for #include directives. // If true, document links are reported for #include directives.
bool showDocumentLinksOnIncludes = true; bool showDocumentLinksOnIncludes = true;
@ -94,7 +104,7 @@ MAKE_REFLECT_STRUCT(Config,
enableIndexing, enableIndexing,
enableCacheWrite, enableCacheWrite,
enableCacheRead, enableCacheRead,
enableProgressReports, progressReportFrequencyMs,
includeCompletionMaximumPathLength, includeCompletionMaximumPathLength,
includeCompletionWhitelistLiteralEnding, includeCompletionWhitelistLiteralEnding,

View File

@ -16,15 +16,27 @@
#include <doctest/doctest.h> #include <doctest/doctest.h>
#include <loguru.hpp> #include <loguru.hpp>
#include <atomic>
#include <chrono>
#include <memory> #include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
namespace { namespace {
long long GetCurrentTimeInMilliseconds() {
auto time_since_epoch = Timer::Clock::now().time_since_epoch();
long long elapsed_milliseconds =
std::chrono::duration_cast<std::chrono::milliseconds>(time_since_epoch)
.count();
return elapsed_milliseconds;
}
// Send indexing progress to client if reporting is enabled. // Send indexing progress to client if reporting is enabled.
void EmitProgress(Config* config) { void EmitProgress(Config* config, ImportPipelineStatus* status) {
if (config->enableProgressReports) { static std::atomic<long long> next_output = 0;
if (config->progressReportFrequencyMs >= 0) {
auto* queue = QueueManager::instance(); auto* queue = QueueManager::instance();
Out_Progress out; Out_Progress out;
out.params.indexRequestCount = queue->index_request.Size(); out.params.indexRequestCount = queue->index_request.Size();
@ -32,6 +44,21 @@ void EmitProgress(Config* config) {
out.params.loadPreviousIndexCount = queue->load_previous_index.Size(); out.params.loadPreviousIndexCount = queue->load_previous_index.Size();
out.params.onIdMappedCount = queue->on_id_mapped.Size(); out.params.onIdMappedCount = queue->on_id_mapped.Size();
out.params.onIndexedCount = queue->on_indexed.Size(); out.params.onIndexedCount = queue->on_indexed.Size();
out.params.activeThreads = status->num_active_threads;
// Ignore this progress update if the last update was too recent.
if (config->progressReportFrequencyMs != 0) {
// Make sure we output a status update if queue lengths are zero.
bool has_state =
out.params.indexRequestCount != 0 || out.params.doIdMapCount != 0 ||
out.params.loadPreviousIndexCount != 0 ||
out.params.onIdMappedCount != 0 || out.params.onIndexedCount != 0 ||
out.params.activeThreads != 0;
if (!has_state || GetCurrentTimeInMilliseconds() < next_output)
return;
next_output =
GetCurrentTimeInMilliseconds() + config->progressReportFrequencyMs;
}
QueueManager::WriteStdout(IpcId::Unknown, out); QueueManager::WriteStdout(IpcId::Unknown, out);
} }
@ -428,7 +455,7 @@ void Indexer_Main(Config* config,
while (true) { while (true) {
status->num_active_threads++; status->num_active_threads++;
EmitProgress(config); EmitProgress(config, status);
// TODO: process all off IndexMain_DoIndex before calling // TODO: process all off IndexMain_DoIndex before calling
// IndexMain_DoCreateIndexUpdate for better icache behavior. We need to have // IndexMain_DoCreateIndexUpdate for better icache behavior. We need to have
@ -467,11 +494,14 @@ void Indexer_Main(Config* config,
bool QueryDb_ImportMain(Config* config, bool QueryDb_ImportMain(Config* config,
QueryDatabase* db, QueryDatabase* db,
ImportManager* import_manager, ImportManager* import_manager,
ImportPipelineStatus* status,
SemanticHighlightSymbolCache* semantic_cache, SemanticHighlightSymbolCache* semantic_cache,
WorkingFiles* working_files) { WorkingFiles* working_files) {
std::unique_ptr<ICacheManager> cache_manager = ICacheManager::Make(config); std::unique_ptr<ICacheManager> cache_manager = ICacheManager::Make(config);
auto* queue = QueueManager::instance(); auto* queue = QueueManager::instance();
EmitProgress(config); EmitProgress(config, status);
status->num_active_threads++;
bool did_work = false; bool did_work = false;
@ -584,6 +614,8 @@ bool QueryDb_ImportMain(Config* config,
import_manager->DoneQueryDbImport(updated_file.path); import_manager->DoneQueryDbImport(updated_file.path);
} }
status->num_active_threads--;
return did_work; return did_work;
} }

View File

@ -43,5 +43,6 @@ void Indexer_Main(Config* config,
bool QueryDb_ImportMain(Config* config, bool QueryDb_ImportMain(Config* config,
QueryDatabase* db, QueryDatabase* db,
ImportManager* import_manager, ImportManager* import_manager,
ImportPipelineStatus* status,
SemanticHighlightSymbolCache* semantic_cache, SemanticHighlightSymbolCache* semantic_cache,
WorkingFiles* working_files); WorkingFiles* working_files);

View File

@ -1059,6 +1059,7 @@ struct Out_Progress : public lsOutMessage<Out_Progress> {
int loadPreviousIndexCount = 0; int loadPreviousIndexCount = 0;
int onIdMappedCount = 0; int onIdMappedCount = 0;
int onIndexedCount = 0; int onIndexedCount = 0;
int activeThreads = 0;
}; };
std::string method = "$cquery/progress"; std::string method = "$cquery/progress";
Params params; Params params;
@ -1068,7 +1069,8 @@ MAKE_REFLECT_STRUCT(Out_Progress::Params,
doIdMapCount, doIdMapCount,
loadPreviousIndexCount, loadPreviousIndexCount,
onIdMappedCount, onIdMappedCount,
onIndexedCount); onIndexedCount,
activeThreads);
MAKE_REFLECT_STRUCT(Out_Progress, jsonrpc, method, params); MAKE_REFLECT_STRUCT(Out_Progress, jsonrpc, method, params);
struct Out_CquerySetInactiveRegion struct Out_CquerySetInactiveRegion

View File

@ -25,8 +25,9 @@ struct CqueryWaitHandler : MessageHandler {
has_work |= import_pipeline_status->num_active_threads != 0; has_work |= import_pipeline_status->num_active_threads != 0;
has_work |= import_manager->HasActiveQuerydbImports(); has_work |= import_manager->HasActiveQuerydbImports();
has_work |= QueueManager::instance()->HasWork(); has_work |= QueueManager::instance()->HasWork();
has_work |= QueryDb_ImportMain(config, db, import_manager, semantic_cache, has_work |=
working_files); QueryDb_ImportMain(config, db, import_manager, import_pipeline_status,
semantic_cache, working_files);
if (!has_work) if (!has_work)
++idle_count; ++idle_count;
else else

View File

@ -12,7 +12,6 @@ long long Timer::ElapsedMicroseconds() const {
std::chrono::time_point<Clock> end = Clock::now(); std::chrono::time_point<Clock> end = Clock::now();
long long elapsed = elapsed_; long long elapsed = elapsed_;
if (start_.has_value()) { if (start_.has_value()) {
// TODO: clang-format this file.
elapsed += elapsed +=
std::chrono::duration_cast<std::chrono::microseconds>(end - *start_) std::chrono::duration_cast<std::chrono::microseconds>(end - *start_)
.count(); .count();