2017-12-24 01:30:52 +00:00
|
|
|
#include "import_pipeline.h"
|
|
|
|
|
2017-12-29 17:27:56 +00:00
|
|
|
#include "cache_manager.h"
|
2017-12-29 16:29:47 +00:00
|
|
|
#include "config.h"
|
2018-03-06 01:18:33 +00:00
|
|
|
#include "diagnostics_engine.h"
|
2018-02-24 00:12:39 +00:00
|
|
|
#include "lsp.h"
|
2017-12-24 01:30:52 +00:00
|
|
|
#include "message_handler.h"
|
|
|
|
#include "platform.h"
|
|
|
|
#include "project.h"
|
|
|
|
#include "query_utils.h"
|
|
|
|
#include "queue_manager.h"
|
|
|
|
#include "timer.h"
|
|
|
|
|
|
|
|
#include <doctest/doctest.h>
|
|
|
|
#include <loguru.hpp>
|
|
|
|
|
2018-01-07 21:06:18 +00:00
|
|
|
#include <chrono>
|
2017-12-24 01:30:52 +00:00
|
|
|
|
2017-12-29 16:45:10 +00:00
|
|
|
namespace {
|
2017-12-29 17:27:56 +00:00
|
|
|
|
2018-02-23 23:27:21 +00:00
|
|
|
struct Out_Progress : public lsOutMessage<Out_Progress> {
|
|
|
|
struct Params {
|
|
|
|
int indexRequestCount = 0;
|
|
|
|
int loadPreviousIndexCount = 0;
|
|
|
|
int onIdMappedCount = 0;
|
|
|
|
int onIndexedCount = 0;
|
|
|
|
int activeThreads = 0;
|
|
|
|
};
|
2018-03-31 03:16:33 +00:00
|
|
|
std::string method = "$ccls/progress";
|
2018-02-23 23:27:21 +00:00
|
|
|
Params params;
|
|
|
|
};
|
|
|
|
MAKE_REFLECT_STRUCT(Out_Progress::Params,
|
|
|
|
indexRequestCount,
|
|
|
|
loadPreviousIndexCount,
|
|
|
|
onIdMappedCount,
|
|
|
|
onIndexedCount,
|
|
|
|
activeThreads);
|
|
|
|
MAKE_REFLECT_STRUCT(Out_Progress, jsonrpc, method, params);
|
|
|
|
|
2018-03-20 03:01:23 +00:00
|
|
|
// Instead of processing messages forever, we only process upto
|
|
|
|
// |kIterationSize| messages of a type at one time. While the import time
|
|
|
|
// likely stays the same, this should reduce overall queue lengths which means
|
|
|
|
// the user gets a usable index faster.
|
|
|
|
struct IterationLoop {
|
|
|
|
const int kIterationSize = 100;
|
|
|
|
int count = 0;
|
|
|
|
|
|
|
|
bool Next() {
|
|
|
|
return count++ < kIterationSize;
|
|
|
|
}
|
|
|
|
void Reset() {
|
|
|
|
count = 0;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2018-01-18 07:11:33 +00:00
|
|
|
struct IModificationTimestampFetcher {
|
|
|
|
virtual ~IModificationTimestampFetcher() = default;
|
2018-04-22 17:01:44 +00:00
|
|
|
virtual std::optional<int64_t> LastWriteTime(const std::string& path) = 0;
|
2018-01-18 07:11:33 +00:00
|
|
|
};
|
|
|
|
struct RealModificationTimestampFetcher : IModificationTimestampFetcher {
|
|
|
|
// IModificationTimestamp:
|
2018-04-22 17:01:44 +00:00
|
|
|
std::optional<int64_t> LastWriteTime(const std::string& path) override {
|
|
|
|
return ::LastWriteTime(path);
|
2018-01-18 07:11:33 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
struct FakeModificationTimestampFetcher : IModificationTimestampFetcher {
|
2018-03-31 03:16:33 +00:00
|
|
|
std::unordered_map<std::string, std::optional<int64_t>> entries;
|
2018-01-18 07:11:33 +00:00
|
|
|
|
|
|
|
// IModificationTimestamp:
|
2018-04-22 17:01:44 +00:00
|
|
|
std::optional<int64_t> LastWriteTime(const std::string& path) override {
|
2018-01-18 07:11:33 +00:00
|
|
|
auto it = entries.find(path);
|
|
|
|
assert(it != entries.end());
|
|
|
|
return it->second;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2018-01-07 21:06:18 +00:00
|
|
|
long long GetCurrentTimeInMilliseconds() {
|
|
|
|
auto time_since_epoch = Timer::Clock::now().time_since_epoch();
|
|
|
|
long long elapsed_milliseconds =
|
|
|
|
std::chrono::duration_cast<std::chrono::milliseconds>(time_since_epoch)
|
|
|
|
.count();
|
|
|
|
return elapsed_milliseconds;
|
|
|
|
}
|
|
|
|
|
2018-01-11 03:56:47 +00:00
|
|
|
struct ActiveThread {
|
2018-04-04 06:05:41 +00:00
|
|
|
ActiveThread(ImportPipelineStatus* status)
|
|
|
|
: status_(status) {
|
2018-04-04 07:43:37 +00:00
|
|
|
if (g_config && g_config->progressReportFrequencyMs < 0)
|
2018-01-11 03:56:47 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
++status_->num_active_threads;
|
|
|
|
}
|
|
|
|
~ActiveThread() {
|
2018-04-04 07:43:37 +00:00
|
|
|
if (g_config && g_config->progressReportFrequencyMs < 0)
|
2018-01-11 03:56:47 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
--status_->num_active_threads;
|
|
|
|
EmitProgress();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send indexing progress to client if reporting is enabled.
|
|
|
|
void EmitProgress() {
|
2017-12-24 01:30:52 +00:00
|
|
|
auto* queue = QueueManager::instance();
|
|
|
|
Out_Progress out;
|
|
|
|
out.params.indexRequestCount = queue->index_request.Size();
|
|
|
|
out.params.onIdMappedCount = queue->on_id_mapped.Size();
|
|
|
|
out.params.onIndexedCount = queue->on_indexed.Size();
|
2018-01-11 03:56:47 +00:00
|
|
|
out.params.activeThreads = status_->num_active_threads;
|
2018-01-07 21:06:18 +00:00
|
|
|
|
|
|
|
// Ignore this progress update if the last update was too recent.
|
2018-04-04 07:43:37 +00:00
|
|
|
if (g_config && g_config->progressReportFrequencyMs != 0) {
|
2018-01-07 21:06:18 +00:00
|
|
|
// Make sure we output a status update if queue lengths are zero.
|
2018-04-30 04:49:03 +00:00
|
|
|
bool all_zero = out.params.indexRequestCount == 0 &&
|
|
|
|
out.params.loadPreviousIndexCount == 0 &&
|
|
|
|
out.params.onIdMappedCount == 0 &&
|
|
|
|
out.params.onIndexedCount == 0 &&
|
|
|
|
out.params.activeThreads == 0;
|
2018-01-18 08:28:08 +00:00
|
|
|
if (!all_zero &&
|
2018-01-11 03:56:47 +00:00
|
|
|
GetCurrentTimeInMilliseconds() < status_->next_progress_output)
|
2018-01-07 21:06:18 +00:00
|
|
|
return;
|
2018-01-11 03:56:47 +00:00
|
|
|
status_->next_progress_output =
|
2018-04-04 06:05:41 +00:00
|
|
|
GetCurrentTimeInMilliseconds() + g_config->progressReportFrequencyMs;
|
2018-01-07 21:06:18 +00:00
|
|
|
}
|
2017-12-24 01:30:52 +00:00
|
|
|
|
2018-03-22 04:05:25 +00:00
|
|
|
QueueManager::WriteStdout(kMethodType_Unknown, out);
|
2017-12-24 01:30:52 +00:00
|
|
|
}
|
2018-01-11 03:56:47 +00:00
|
|
|
|
|
|
|
ImportPipelineStatus* status_;
|
|
|
|
};
|
2017-12-24 01:30:52 +00:00
|
|
|
|
2018-01-18 07:11:33 +00:00
|
|
|
enum class ShouldParse { Yes, No, NoSuchFile };
|
2017-12-24 01:30:52 +00:00
|
|
|
|
2018-01-18 05:53:03 +00:00
|
|
|
// Checks if |path| needs to be reparsed. This will modify cached state
|
|
|
|
// such that calling this function twice with the same path may return true
|
|
|
|
// the first time but will return false the second.
|
2018-01-18 07:19:08 +00:00
|
|
|
//
|
|
|
|
// |from|: The file which generated the parse request for this file.
|
2018-01-18 07:11:33 +00:00
|
|
|
ShouldParse FileNeedsParse(
|
|
|
|
bool is_interactive,
|
|
|
|
TimestampManager* timestamp_manager,
|
|
|
|
IModificationTimestampFetcher* modification_timestamp_fetcher,
|
2018-01-30 05:34:28 +00:00
|
|
|
const std::shared_ptr<ICacheManager>& cache_manager,
|
2018-01-18 07:11:33 +00:00
|
|
|
IndexFile* opt_previous_index,
|
|
|
|
const std::string& path,
|
|
|
|
const std::vector<std::string>& args,
|
2018-03-31 03:16:33 +00:00
|
|
|
const std::optional<std::string>& from) {
|
|
|
|
auto unwrap_opt = [](const std::optional<std::string>& opt) -> std::string {
|
2018-01-18 07:19:08 +00:00
|
|
|
if (opt)
|
|
|
|
return " (via " + *opt + ")";
|
|
|
|
return "";
|
|
|
|
};
|
|
|
|
|
2018-03-31 03:16:33 +00:00
|
|
|
std::optional<int64_t> modification_timestamp =
|
2018-04-22 17:01:44 +00:00
|
|
|
modification_timestamp_fetcher->LastWriteTime(path);
|
2018-01-18 05:53:03 +00:00
|
|
|
|
|
|
|
// Cannot find file.
|
|
|
|
if (!modification_timestamp)
|
2018-01-18 07:11:33 +00:00
|
|
|
return ShouldParse::NoSuchFile;
|
2018-01-18 05:53:03 +00:00
|
|
|
|
2018-03-31 03:16:33 +00:00
|
|
|
std::optional<int64_t> last_cached_modification =
|
2018-02-22 07:34:32 +00:00
|
|
|
timestamp_manager->GetLastCachedModificationTime(cache_manager.get(),
|
|
|
|
path);
|
2018-01-18 05:53:03 +00:00
|
|
|
|
|
|
|
// File has been changed.
|
|
|
|
if (!last_cached_modification ||
|
|
|
|
modification_timestamp != *last_cached_modification) {
|
2018-01-18 07:19:08 +00:00
|
|
|
LOG_S(INFO) << "Timestamp has changed for " << path << unwrap_opt(from);
|
2018-01-18 07:11:33 +00:00
|
|
|
return ShouldParse::Yes;
|
2018-01-18 05:53:03 +00:00
|
|
|
}
|
|
|
|
|
2018-01-18 06:04:07 +00:00
|
|
|
// Command-line arguments changed.
|
2018-01-18 12:15:15 +00:00
|
|
|
auto is_file = [](const std::string& arg) {
|
|
|
|
return EndsWithAny(arg, {".h", ".c", ".cc", ".cpp", ".hpp", ".m", ".mm"});
|
|
|
|
};
|
|
|
|
if (opt_previous_index) {
|
|
|
|
auto& prev_args = opt_previous_index->args;
|
|
|
|
bool same = prev_args.size() == args.size();
|
|
|
|
for (size_t i = 0; i < args.size() && same; ++i) {
|
|
|
|
same = prev_args[i] == args[i] ||
|
|
|
|
(is_file(prev_args[i]) && is_file(args[i]));
|
|
|
|
}
|
|
|
|
if (!same) {
|
|
|
|
LOG_S(INFO) << "Arguments have changed for " << path << unwrap_opt(from);
|
|
|
|
return ShouldParse::Yes;
|
|
|
|
}
|
2018-01-18 07:19:08 +00:00
|
|
|
}
|
2018-01-18 06:04:07 +00:00
|
|
|
|
2018-01-18 05:53:03 +00:00
|
|
|
// File has not changed, do not parse it.
|
2018-01-18 07:11:33 +00:00
|
|
|
return ShouldParse::No;
|
2018-01-18 05:53:03 +00:00
|
|
|
};
|
2018-01-18 05:48:09 +00:00
|
|
|
|
2018-01-18 07:59:48 +00:00
|
|
|
enum CacheLoadResult { Parse, DoNotParse };
|
|
|
|
CacheLoadResult TryLoadFromCache(
|
2017-12-29 16:29:47 +00:00
|
|
|
FileConsumerSharedState* file_consumer_shared,
|
2017-12-24 01:30:52 +00:00
|
|
|
TimestampManager* timestamp_manager,
|
2018-01-18 07:11:33 +00:00
|
|
|
IModificationTimestampFetcher* modification_timestamp_fetcher,
|
2018-01-30 05:34:28 +00:00
|
|
|
const std::shared_ptr<ICacheManager>& cache_manager,
|
2017-12-24 01:30:52 +00:00
|
|
|
bool is_interactive,
|
2018-01-18 05:48:09 +00:00
|
|
|
const Project::Entry& entry,
|
2018-01-18 07:59:48 +00:00
|
|
|
const std::string& path_to_index) {
|
|
|
|
// Always run this block, even if we are interactive, so we can check
|
|
|
|
// dependencies and reset files in |file_consumer_shared|.
|
|
|
|
IndexFile* previous_index = cache_manager->TryLoad(path_to_index);
|
|
|
|
if (!previous_index)
|
|
|
|
return CacheLoadResult::Parse;
|
|
|
|
|
|
|
|
// If none of the dependencies have changed and the index is not
|
|
|
|
// interactive (ie, requested by a file save), skip parsing and just load
|
|
|
|
// from cache.
|
|
|
|
|
|
|
|
// Check timestamps and update |file_consumer_shared|.
|
|
|
|
ShouldParse path_state = FileNeedsParse(
|
|
|
|
is_interactive, timestamp_manager, modification_timestamp_fetcher,
|
2018-04-30 04:49:03 +00:00
|
|
|
cache_manager, previous_index, path_to_index, entry.args,
|
2018-03-31 03:16:33 +00:00
|
|
|
std::nullopt);
|
2018-01-18 08:05:12 +00:00
|
|
|
if (path_state == ShouldParse::Yes)
|
|
|
|
file_consumer_shared->Reset(path_to_index);
|
2018-01-18 07:59:48 +00:00
|
|
|
|
|
|
|
// Target file does not exist on disk, do not emit any indexes.
|
|
|
|
// TODO: Dependencies should be reassigned to other files. We can do this by
|
|
|
|
// updating the "primary_file" if it doesn't exist. Might not actually be a
|
|
|
|
// problem in practice.
|
|
|
|
if (path_state == ShouldParse::NoSuchFile)
|
|
|
|
return CacheLoadResult::DoNotParse;
|
|
|
|
|
|
|
|
bool needs_reparse = is_interactive || path_state == ShouldParse::Yes;
|
|
|
|
|
|
|
|
for (const std::string& dependency : previous_index->dependencies) {
|
|
|
|
assert(!dependency.empty());
|
|
|
|
|
2018-01-18 08:05:12 +00:00
|
|
|
if (FileNeedsParse(is_interactive, timestamp_manager,
|
2018-04-30 04:49:03 +00:00
|
|
|
modification_timestamp_fetcher, cache_manager,
|
|
|
|
previous_index, dependency, entry.args,
|
2018-01-18 08:05:12 +00:00
|
|
|
previous_index->path) == ShouldParse::Yes) {
|
2018-01-18 07:59:48 +00:00
|
|
|
needs_reparse = true;
|
2018-01-18 08:05:12 +00:00
|
|
|
|
|
|
|
// Do not break here, as we need to update |file_consumer_shared| for
|
|
|
|
// every dependency that needs to be reparsed.
|
|
|
|
file_consumer_shared->Reset(dependency);
|
2018-01-18 07:59:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: should we still load from cache?
|
|
|
|
if (needs_reparse)
|
|
|
|
return CacheLoadResult::Parse;
|
|
|
|
|
|
|
|
// No timestamps changed - load directly from cache.
|
|
|
|
LOG_S(INFO) << "Skipping parse; no timestamp change for " << path_to_index;
|
|
|
|
|
|
|
|
// TODO/FIXME: real perf
|
|
|
|
PerformanceImportFile perf;
|
|
|
|
|
2018-04-30 04:49:03 +00:00
|
|
|
std::vector<Index_OnIdMapped> result;
|
|
|
|
result.push_back(Index_OnIdMapped(
|
|
|
|
cache_manager, nullptr, cache_manager->TryTakeOrLoad(path_to_index), perf,
|
|
|
|
is_interactive, false /*write_to_disk*/));
|
2018-01-18 07:59:48 +00:00
|
|
|
for (const std::string& dependency : previous_index->dependencies) {
|
|
|
|
// Only load a dependency if it is not already loaded.
|
|
|
|
//
|
|
|
|
// This is important for perf in large projects where there are lots of
|
|
|
|
// dependencies shared between many files.
|
|
|
|
if (!file_consumer_shared->Mark(dependency))
|
|
|
|
continue;
|
|
|
|
|
2018-04-30 04:49:03 +00:00
|
|
|
LOG_S(INFO) << "emit index for " << dependency << " via "
|
|
|
|
<< previous_index->path;
|
2018-01-18 07:59:48 +00:00
|
|
|
|
|
|
|
// |dependency_index| may be null if there is no cache for it but
|
|
|
|
// another file has already started importing it.
|
2018-04-30 04:49:03 +00:00
|
|
|
if (std::unique_ptr<IndexFile> dependency_index =
|
|
|
|
cache_manager->TryTakeOrLoad(dependency)) {
|
|
|
|
result.push_back(
|
|
|
|
Index_OnIdMapped(cache_manager, nullptr, std::move(dependency_index),
|
|
|
|
perf, is_interactive, false /*write_to_disk*/));
|
|
|
|
}
|
2018-01-18 07:59:48 +00:00
|
|
|
}
|
|
|
|
|
2018-04-30 04:49:03 +00:00
|
|
|
QueueManager::instance()->on_id_mapped.EnqueueAll(std::move(result));
|
2018-01-18 07:59:48 +00:00
|
|
|
return CacheLoadResult::DoNotParse;
|
|
|
|
}
|
|
|
|
|
2018-01-18 08:21:39 +00:00
|
|
|
std::vector<FileContents> PreloadFileContents(
|
2018-01-30 05:34:28 +00:00
|
|
|
const std::shared_ptr<ICacheManager>& cache_manager,
|
2018-01-18 08:21:39 +00:00
|
|
|
const Project::Entry& entry,
|
|
|
|
const std::string& entry_contents,
|
|
|
|
const std::string& path_to_index) {
|
2017-12-24 01:30:52 +00:00
|
|
|
// Load file contents for all dependencies into memory. If the dependencies
|
|
|
|
// for the file changed we may not end up using all of the files we
|
|
|
|
// preloaded. If a new dependency was added the indexer will grab the file
|
|
|
|
// contents as soon as possible.
|
|
|
|
//
|
|
|
|
// We do this to minimize the race between indexing a file and capturing the
|
|
|
|
// file contents.
|
|
|
|
//
|
|
|
|
// TODO: We might be able to optimize perf by only copying for files in
|
|
|
|
// working_files. We can pass that same set of files to the indexer as
|
|
|
|
// well. We then default to a fast file-copy if not in working set.
|
2018-02-01 05:20:38 +00:00
|
|
|
|
|
|
|
// index->file_contents comes from cache, so we need to check if that cache is
|
|
|
|
// still valid. if so, we can use it, otherwise we need to load from disk.
|
|
|
|
auto get_latest_content = [](const std::string& path, int64_t cached_time,
|
2018-02-01 05:49:56 +00:00
|
|
|
const std::string& cached) -> std::string {
|
2018-04-22 17:01:44 +00:00
|
|
|
std::optional<int64_t> mod_time = LastWriteTime(path);
|
2018-02-01 05:20:38 +00:00
|
|
|
if (!mod_time)
|
2018-02-01 05:49:56 +00:00
|
|
|
return "";
|
2018-02-01 05:20:38 +00:00
|
|
|
|
|
|
|
if (*mod_time == cached_time)
|
|
|
|
return cached;
|
|
|
|
|
2018-03-31 03:16:33 +00:00
|
|
|
std::optional<std::string> fresh_content = ReadContent(path);
|
2018-02-01 05:20:38 +00:00
|
|
|
if (!fresh_content) {
|
|
|
|
LOG_S(ERROR) << "Failed to load content for " << path;
|
2018-02-01 05:49:56 +00:00
|
|
|
return "";
|
2018-02-01 05:20:38 +00:00
|
|
|
}
|
|
|
|
return *fresh_content;
|
|
|
|
};
|
|
|
|
|
2018-01-30 05:34:28 +00:00
|
|
|
std::vector<FileContents> file_contents;
|
2018-02-01 05:20:38 +00:00
|
|
|
file_contents.push_back(FileContents(entry.filename, entry_contents));
|
2017-12-29 17:27:56 +00:00
|
|
|
cache_manager->IterateLoadedCaches([&](IndexFile* index) {
|
2018-02-01 05:20:38 +00:00
|
|
|
if (index->path == entry.filename)
|
2018-01-31 16:42:25 +00:00
|
|
|
return;
|
2018-02-01 05:20:38 +00:00
|
|
|
file_contents.push_back(FileContents(
|
|
|
|
index->path,
|
|
|
|
get_latest_content(index->path, index->last_modification_time,
|
|
|
|
index->file_contents)));
|
2017-12-29 17:27:56 +00:00
|
|
|
});
|
2017-12-24 01:30:52 +00:00
|
|
|
|
2018-01-18 08:21:39 +00:00
|
|
|
return file_contents;
|
|
|
|
}
|
|
|
|
|
2018-04-04 06:05:41 +00:00
|
|
|
void ParseFile(DiagnosticsEngine* diag_engine,
|
2018-01-18 08:21:39 +00:00
|
|
|
WorkingFiles* working_files,
|
|
|
|
FileConsumerSharedState* file_consumer_shared,
|
|
|
|
TimestampManager* timestamp_manager,
|
|
|
|
IModificationTimestampFetcher* modification_timestamp_fetcher,
|
|
|
|
IIndexer* indexer,
|
2018-01-20 07:56:49 +00:00
|
|
|
const Index_Request& request,
|
|
|
|
const Project::Entry& entry) {
|
2018-01-18 08:21:39 +00:00
|
|
|
// If the file is inferred, we may not actually be able to parse that file
|
|
|
|
// directly (ie, a header file, which are not listed in the project). If this
|
|
|
|
// file is inferred, then try to use the file which originally imported it.
|
|
|
|
std::string path_to_index = entry.filename;
|
|
|
|
if (entry.is_inferred) {
|
2018-01-30 05:34:28 +00:00
|
|
|
IndexFile* entry_cache = request.cache_manager->TryLoad(entry.filename);
|
2018-01-18 08:21:39 +00:00
|
|
|
if (entry_cache)
|
|
|
|
path_to_index = entry_cache->import_file;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to load the file from cache.
|
|
|
|
if (TryLoadFromCache(file_consumer_shared, timestamp_manager,
|
2018-04-30 04:49:03 +00:00
|
|
|
modification_timestamp_fetcher, request.cache_manager,
|
|
|
|
request.is_interactive, entry,
|
2018-01-18 08:21:39 +00:00
|
|
|
path_to_index) == CacheLoadResult::DoNotParse) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
LOG_S(INFO) << "Parsing " << path_to_index;
|
2018-01-20 07:56:49 +00:00
|
|
|
std::vector<FileContents> file_contents = PreloadFileContents(
|
2018-02-22 07:34:32 +00:00
|
|
|
request.cache_manager, entry, request.contents, path_to_index);
|
2018-01-18 08:21:39 +00:00
|
|
|
|
2018-04-30 04:49:03 +00:00
|
|
|
std::vector<Index_OnIdMapped> result;
|
2017-12-24 01:30:52 +00:00
|
|
|
PerformanceImportFile perf;
|
2018-04-04 06:05:41 +00:00
|
|
|
auto indexes = indexer->Index(file_consumer_shared, path_to_index, entry.args,
|
|
|
|
file_contents, &perf);
|
2018-01-20 07:56:49 +00:00
|
|
|
|
2018-04-02 07:22:12 +00:00
|
|
|
if (indexes.empty()) {
|
2018-04-16 19:36:02 +00:00
|
|
|
if (g_config->index.enabled && request.id.Valid()) {
|
2018-01-20 07:56:49 +00:00
|
|
|
Out_Error out;
|
|
|
|
out.id = request.id;
|
|
|
|
out.error.code = lsErrorCodes::InternalError;
|
|
|
|
out.error.message = "Failed to index " + path_to_index;
|
2018-03-22 04:05:25 +00:00
|
|
|
QueueManager::WriteStdout(kMethodType_Unknown, out);
|
2018-01-20 07:56:49 +00:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-04-02 07:22:12 +00:00
|
|
|
for (std::unique_ptr<IndexFile>& new_index : indexes) {
|
2017-12-24 01:30:52 +00:00
|
|
|
Timer time;
|
|
|
|
|
|
|
|
// Only emit diagnostics for non-interactive sessions, which makes it easier
|
|
|
|
// to identify indexing problems. For interactive sessions, diagnostics are
|
|
|
|
// handled by code completion.
|
2018-01-20 07:56:49 +00:00
|
|
|
if (!request.is_interactive)
|
2018-03-06 01:18:33 +00:00
|
|
|
diag_engine->Publish(working_files, new_index->path,
|
|
|
|
new_index->diagnostics_);
|
2017-12-24 01:30:52 +00:00
|
|
|
|
|
|
|
// When main thread does IdMap request it will request the previous index if
|
|
|
|
// needed.
|
|
|
|
LOG_S(INFO) << "Emitting index result for " << new_index->path;
|
2018-04-30 04:49:03 +00:00
|
|
|
result.push_back(
|
|
|
|
Index_OnIdMapped(request.cache_manager,
|
|
|
|
request.cache_manager->TryTakeOrLoad(path_to_index),
|
|
|
|
std::move(new_index), perf, request.is_interactive,
|
|
|
|
true /*write_to_disk*/));
|
2017-12-24 01:30:52 +00:00
|
|
|
}
|
|
|
|
|
2018-04-30 04:49:03 +00:00
|
|
|
QueueManager::instance()->on_id_mapped.EnqueueAll(std::move(result),
|
|
|
|
request.is_interactive);
|
2017-12-24 01:30:52 +00:00
|
|
|
}
|
|
|
|
|
2018-01-18 07:11:33 +00:00
|
|
|
bool IndexMain_DoParse(
|
2018-03-06 01:18:33 +00:00
|
|
|
DiagnosticsEngine* diag_engine,
|
2018-01-18 07:11:33 +00:00
|
|
|
WorkingFiles* working_files,
|
|
|
|
FileConsumerSharedState* file_consumer_shared,
|
|
|
|
TimestampManager* timestamp_manager,
|
|
|
|
IModificationTimestampFetcher* modification_timestamp_fetcher,
|
|
|
|
IIndexer* indexer) {
|
2017-12-24 01:30:52 +00:00
|
|
|
auto* queue = QueueManager::instance();
|
2018-03-31 03:16:33 +00:00
|
|
|
std::optional<Index_Request> request = queue->index_request.TryPopFront();
|
2017-12-24 01:30:52 +00:00
|
|
|
if (!request)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Project::Entry entry;
|
|
|
|
entry.filename = request->path;
|
|
|
|
entry.args = request->args;
|
2018-04-04 06:05:41 +00:00
|
|
|
ParseFile(diag_engine, working_files, file_consumer_shared,
|
2018-04-30 04:49:03 +00:00
|
|
|
timestamp_manager, modification_timestamp_fetcher,
|
2018-03-06 01:18:33 +00:00
|
|
|
indexer, request.value(), entry);
|
2017-12-24 01:30:52 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-01-30 05:34:28 +00:00
|
|
|
bool IndexMain_DoCreateIndexUpdate(TimestampManager* timestamp_manager) {
|
2017-12-24 01:30:52 +00:00
|
|
|
auto* queue = QueueManager::instance();
|
|
|
|
|
2018-03-20 18:55:40 +00:00
|
|
|
bool did_work = false;
|
|
|
|
IterationLoop loop;
|
|
|
|
while (loop.Next()) {
|
2018-03-31 03:16:33 +00:00
|
|
|
std::optional<Index_OnIdMapped> response = queue->on_id_mapped.TryPopFront();
|
2018-03-20 18:55:40 +00:00
|
|
|
if (!response)
|
|
|
|
return did_work;
|
2017-12-24 01:30:52 +00:00
|
|
|
|
2018-03-20 18:55:40 +00:00
|
|
|
did_work = true;
|
2017-12-24 01:30:52 +00:00
|
|
|
|
2018-03-20 18:55:40 +00:00
|
|
|
Timer time;
|
|
|
|
|
|
|
|
// Build delta update.
|
2018-04-30 04:49:03 +00:00
|
|
|
IndexUpdate update = IndexUpdate::CreateDelta(response->previous.get(),
|
|
|
|
response->current.get());
|
2018-03-20 18:55:40 +00:00
|
|
|
response->perf.index_make_delta = time.ElapsedMicrosecondsAndReset();
|
2018-04-30 04:49:03 +00:00
|
|
|
LOG_S(INFO) << "Built index update for " << response->current->path
|
2018-03-20 18:55:40 +00:00
|
|
|
<< " (is_delta=" << !!response->previous << ")";
|
|
|
|
|
|
|
|
// Write current index to disk if requested.
|
|
|
|
if (response->write_to_disk) {
|
2018-04-30 04:49:03 +00:00
|
|
|
LOG_S(INFO) << "Writing index to disk for " << response->current->path;
|
2018-03-20 18:55:40 +00:00
|
|
|
time.Reset();
|
2018-04-30 04:49:03 +00:00
|
|
|
response->cache_manager->WriteToCache(*response->current);
|
2018-03-20 18:55:40 +00:00
|
|
|
response->perf.index_save_to_disk = time.ElapsedMicrosecondsAndReset();
|
|
|
|
timestamp_manager->UpdateCachedModificationTime(
|
2018-04-30 04:49:03 +00:00
|
|
|
response->current->path,
|
|
|
|
response->current->last_modification_time);
|
2018-03-20 18:55:40 +00:00
|
|
|
}
|
2017-12-24 01:30:52 +00:00
|
|
|
|
2018-03-20 18:55:40 +00:00
|
|
|
Index_OnIndexed reply(std::move(update), response->perf);
|
|
|
|
queue->on_indexed.PushBack(std::move(reply), response->is_interactive);
|
2017-12-24 01:30:52 +00:00
|
|
|
}
|
|
|
|
|
2018-03-20 18:55:40 +00:00
|
|
|
return did_work;
|
2017-12-24 01:30:52 +00:00
|
|
|
}
|
|
|
|
|
2017-12-29 16:45:10 +00:00
|
|
|
} // namespace
|
|
|
|
|
2018-04-22 17:01:44 +00:00
|
|
|
std::optional<int64_t> TimestampManager::GetLastCachedModificationTime(
|
|
|
|
ICacheManager* cache_manager,
|
|
|
|
const std::string& path) {
|
|
|
|
{
|
|
|
|
std::lock_guard<std::mutex> guard(mutex_);
|
|
|
|
auto it = timestamps_.find(path);
|
|
|
|
if (it != timestamps_.end())
|
|
|
|
return it->second;
|
|
|
|
}
|
|
|
|
IndexFile* file = cache_manager->TryLoad(path);
|
|
|
|
if (!file)
|
|
|
|
return std::nullopt;
|
|
|
|
|
|
|
|
UpdateCachedModificationTime(path, file->last_modification_time);
|
|
|
|
return file->last_modification_time;
|
|
|
|
}
|
|
|
|
|
|
|
|
void TimestampManager::UpdateCachedModificationTime(const std::string& path,
|
|
|
|
int64_t timestamp) {
|
|
|
|
std::lock_guard<std::mutex> guard(mutex_);
|
|
|
|
timestamps_[path] = timestamp;
|
|
|
|
}
|
|
|
|
|
2018-01-07 21:37:30 +00:00
|
|
|
ImportPipelineStatus::ImportPipelineStatus()
|
|
|
|
: num_active_threads(0), next_progress_output(0) {}
|
2017-12-29 16:45:10 +00:00
|
|
|
|
|
|
|
// Index a file using an already-parsed translation unit from code completion.
|
|
|
|
// Since most of the time for indexing a file comes from parsing, we can do
|
|
|
|
// real-time indexing.
|
|
|
|
// TODO: add option to disable this.
|
|
|
|
void IndexWithTuFromCodeCompletion(
|
|
|
|
FileConsumerSharedState* file_consumer_shared,
|
|
|
|
ClangTranslationUnit* tu,
|
|
|
|
const std::vector<CXUnsavedFile>& file_contents,
|
|
|
|
const std::string& path,
|
|
|
|
const std::vector<std::string>& args) {
|
|
|
|
file_consumer_shared->Reset(path);
|
|
|
|
|
|
|
|
PerformanceImportFile perf;
|
|
|
|
ClangIndex index;
|
2018-04-04 06:05:41 +00:00
|
|
|
auto indexes = ParseWithTu(file_consumer_shared, &perf, tu, &index, path,
|
|
|
|
args, file_contents);
|
2018-04-02 07:22:12 +00:00
|
|
|
if (indexes.empty())
|
2018-04-04 06:05:41 +00:00
|
|
|
return;
|
2017-12-29 16:45:10 +00:00
|
|
|
|
2018-04-30 04:49:03 +00:00
|
|
|
std::vector<Index_OnIdMapped> result;
|
2018-04-02 07:22:12 +00:00
|
|
|
for (std::unique_ptr<IndexFile>& new_index : indexes) {
|
2017-12-29 16:45:10 +00:00
|
|
|
Timer time;
|
|
|
|
|
2018-01-30 05:34:28 +00:00
|
|
|
std::shared_ptr<ICacheManager> cache_manager;
|
|
|
|
assert(false && "FIXME cache_manager");
|
2017-12-29 16:45:10 +00:00
|
|
|
// When main thread does IdMap request it will request the previous index if
|
|
|
|
// needed.
|
2018-04-30 04:49:03 +00:00
|
|
|
LOG_S(INFO) << "Emitting index for " << new_index->path;
|
|
|
|
result.push_back(Index_OnIdMapped(
|
|
|
|
cache_manager, cache_manager->TryTakeOrLoad(path), std::move(new_index),
|
|
|
|
perf, true /*is_interactive*/, true /*write_to_disk*/));
|
2017-12-29 16:45:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
LOG_IF_S(WARNING, result.size() > 1)
|
|
|
|
<< "Code completion index update generated more than one index";
|
|
|
|
|
2018-04-30 04:49:03 +00:00
|
|
|
QueueManager::instance()->on_id_mapped.EnqueueAll(std::move(result));
|
2017-12-29 16:45:10 +00:00
|
|
|
}
|
|
|
|
|
2018-04-04 06:05:41 +00:00
|
|
|
void Indexer_Main(DiagnosticsEngine* diag_engine,
|
2017-12-29 16:29:47 +00:00
|
|
|
FileConsumerSharedState* file_consumer_shared,
|
2017-12-29 15:56:34 +00:00
|
|
|
TimestampManager* timestamp_manager,
|
|
|
|
ImportPipelineStatus* status,
|
|
|
|
Project* project,
|
|
|
|
WorkingFiles* working_files,
|
|
|
|
MultiQueueWaiter* waiter) {
|
2018-01-18 07:11:33 +00:00
|
|
|
RealModificationTimestampFetcher modification_timestamp_fetcher;
|
2017-12-28 18:21:30 +00:00
|
|
|
auto* queue = QueueManager::instance();
|
2017-12-24 01:30:52 +00:00
|
|
|
// Build one index per-indexer, as building the index acquires a global lock.
|
2018-04-22 17:01:44 +00:00
|
|
|
auto indexer = std::make_unique<ClangIndexer>();
|
2017-12-24 01:30:52 +00:00
|
|
|
|
2018-03-21 18:57:09 +00:00
|
|
|
while (true) {
|
2018-01-11 03:56:47 +00:00
|
|
|
bool did_work = false;
|
|
|
|
|
|
|
|
{
|
2018-04-04 06:05:41 +00:00
|
|
|
ActiveThread active_thread(status);
|
2018-01-11 03:56:47 +00:00
|
|
|
|
|
|
|
// TODO: process all off IndexMain_DoIndex before calling
|
|
|
|
// IndexMain_DoCreateIndexUpdate for better icache behavior. We need to
|
|
|
|
// have some threads spinning on both though otherwise memory usage will
|
|
|
|
// get bad.
|
|
|
|
|
|
|
|
// We need to make sure to run both IndexMain_DoParse and
|
|
|
|
// IndexMain_DoCreateIndexUpdate so we don't starve querydb from doing any
|
|
|
|
// work. Running both also lets the user query the partially constructed
|
|
|
|
// index.
|
2018-04-04 06:05:41 +00:00
|
|
|
did_work = IndexMain_DoParse(diag_engine, working_files,
|
2018-03-06 01:18:33 +00:00
|
|
|
file_consumer_shared, timestamp_manager,
|
|
|
|
&modification_timestamp_fetcher,
|
2018-04-30 04:49:03 +00:00
|
|
|
indexer.get()) ||
|
2018-03-06 01:18:33 +00:00
|
|
|
did_work;
|
2018-01-11 03:56:47 +00:00
|
|
|
|
2018-02-22 07:34:32 +00:00
|
|
|
did_work = IndexMain_DoCreateIndexUpdate(timestamp_manager) || did_work;
|
2018-01-11 03:56:47 +00:00
|
|
|
}
|
2017-12-28 17:18:54 +00:00
|
|
|
|
|
|
|
// We didn't do any work, so wait for a notification.
|
2018-01-11 03:56:47 +00:00
|
|
|
if (!did_work) {
|
2018-01-01 23:09:46 +00:00
|
|
|
waiter->Wait(&queue->on_indexed, &queue->index_request,
|
2018-04-30 04:49:03 +00:00
|
|
|
&queue->on_id_mapped);
|
2017-12-28 17:18:54 +00:00
|
|
|
}
|
|
|
|
}
|
2018-02-05 03:38:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
void QueryDb_OnIndexed(QueueManager* queue,
|
|
|
|
QueryDatabase* db,
|
|
|
|
ImportPipelineStatus* status,
|
|
|
|
SemanticHighlightSymbolCache* semantic_cache,
|
|
|
|
WorkingFiles* working_files,
|
|
|
|
Index_OnIndexed* response) {
|
|
|
|
Timer time;
|
|
|
|
db->ApplyIndexUpdate(&response->update);
|
|
|
|
|
|
|
|
// Update indexed content, inactive lines, and semantic highlighting.
|
2018-04-30 04:49:03 +00:00
|
|
|
if (response->update.files_def_update) {
|
|
|
|
auto& update = *response->update.files_def_update;
|
|
|
|
time.ResetAndPrint("apply index for " + update.value.path);
|
2018-02-05 03:38:57 +00:00
|
|
|
WorkingFile* working_file =
|
2018-04-30 04:49:03 +00:00
|
|
|
working_files->GetFileByFilename(update.value.path);
|
2018-02-05 03:38:57 +00:00
|
|
|
if (working_file) {
|
|
|
|
// Update indexed content.
|
2018-04-30 04:49:03 +00:00
|
|
|
working_file->SetIndexContent(update.file_content);
|
2018-02-05 03:38:57 +00:00
|
|
|
|
|
|
|
// Inactive lines.
|
2018-04-30 04:49:03 +00:00
|
|
|
EmitInactiveLines(working_file, update.value.inactive_regions);
|
2018-02-05 03:38:57 +00:00
|
|
|
|
|
|
|
// Semantic highlighting.
|
2018-04-30 04:49:03 +00:00
|
|
|
int file_id =
|
|
|
|
db->name2file_id[LowerPathIfInsensitive(working_file->filename)];
|
|
|
|
QueryFile* file = &db->files[file_id];
|
2018-02-05 03:38:57 +00:00
|
|
|
EmitSemanticHighlighting(db, semantic_cache, working_file, file);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-03-20 03:01:23 +00:00
|
|
|
|
2018-02-05 03:38:57 +00:00
|
|
|
} // namespace
|
|
|
|
|
2018-04-04 06:05:41 +00:00
|
|
|
bool QueryDb_ImportMain(QueryDatabase* db,
|
2018-01-07 21:06:18 +00:00
|
|
|
ImportPipelineStatus* status,
|
2017-12-24 01:30:52 +00:00
|
|
|
SemanticHighlightSymbolCache* semantic_cache,
|
|
|
|
WorkingFiles* working_files) {
|
|
|
|
auto* queue = QueueManager::instance();
|
2018-01-07 21:06:18 +00:00
|
|
|
|
2018-04-04 06:05:41 +00:00
|
|
|
ActiveThread active_thread(status);
|
2017-12-24 01:30:52 +00:00
|
|
|
|
|
|
|
bool did_work = false;
|
|
|
|
|
2018-03-20 03:01:23 +00:00
|
|
|
IterationLoop loop;
|
|
|
|
while (loop.Next()) {
|
2018-03-31 03:16:33 +00:00
|
|
|
std::optional<Index_OnIndexed> response = queue->on_indexed.TryPopFront();
|
2017-12-24 01:30:52 +00:00
|
|
|
if (!response)
|
|
|
|
break;
|
|
|
|
did_work = true;
|
2018-04-30 04:49:03 +00:00
|
|
|
QueryDb_OnIndexed(queue, db, status, semantic_cache, working_files,
|
|
|
|
&*response);
|
2017-12-24 01:30:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return did_work;
|
|
|
|
}
|
2017-12-29 17:27:56 +00:00
|
|
|
|
|
|
|
TEST_SUITE("ImportPipeline") {
|
2018-01-07 00:30:41 +00:00
|
|
|
struct Fixture {
|
|
|
|
Fixture() {
|
2018-04-04 06:05:41 +00:00
|
|
|
g_config = std::make_unique<Config>();
|
2018-03-10 23:40:27 +00:00
|
|
|
QueueManager::Init(&querydb_waiter, &indexer_waiter, &stdout_waiter);
|
2018-01-07 00:30:41 +00:00
|
|
|
|
|
|
|
queue = QueueManager::instance();
|
|
|
|
cache_manager = ICacheManager::MakeFake({});
|
|
|
|
indexer = IIndexer::MakeTestIndexer({});
|
2018-04-04 06:05:41 +00:00
|
|
|
diag_engine.Init();
|
2018-01-07 00:30:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool PumpOnce() {
|
2018-04-04 06:05:41 +00:00
|
|
|
return IndexMain_DoParse(&diag_engine, &working_files,
|
2018-03-06 01:18:33 +00:00
|
|
|
&file_consumer_shared, ×tamp_manager,
|
2018-04-30 04:49:03 +00:00
|
|
|
&modification_timestamp_fetcher, indexer.get());
|
2018-01-07 00:30:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void MakeRequest(const std::string& path,
|
|
|
|
const std::vector<std::string>& args = {},
|
|
|
|
bool is_interactive = false,
|
|
|
|
const std::string& contents = "void foo();") {
|
2018-02-05 06:03:22 +00:00
|
|
|
queue->index_request.PushBack(
|
2018-01-30 05:34:28 +00:00
|
|
|
Index_Request(path, args, is_interactive, contents, cache_manager));
|
2018-01-07 00:30:41 +00:00
|
|
|
}
|
|
|
|
|
2018-01-07 00:20:37 +00:00
|
|
|
MultiQueueWaiter querydb_waiter;
|
|
|
|
MultiQueueWaiter indexer_waiter;
|
|
|
|
MultiQueueWaiter stdout_waiter;
|
2017-12-29 17:27:56 +00:00
|
|
|
|
2018-01-07 00:30:41 +00:00
|
|
|
QueueManager* queue = nullptr;
|
2018-03-06 03:03:39 +00:00
|
|
|
DiagnosticsEngine diag_engine;
|
2017-12-29 17:27:56 +00:00
|
|
|
WorkingFiles working_files;
|
|
|
|
FileConsumerSharedState file_consumer_shared;
|
|
|
|
TimestampManager timestamp_manager;
|
2018-01-18 07:11:33 +00:00
|
|
|
FakeModificationTimestampFetcher modification_timestamp_fetcher;
|
2018-01-30 05:34:28 +00:00
|
|
|
std::shared_ptr<ICacheManager> cache_manager;
|
2018-01-07 00:30:41 +00:00
|
|
|
std::unique_ptr<IIndexer> indexer;
|
|
|
|
};
|
2018-01-07 00:20:37 +00:00
|
|
|
|
2018-01-18 07:11:33 +00:00
|
|
|
TEST_CASE_FIXTURE(Fixture, "FileNeedsParse") {
|
|
|
|
auto check = [&](const std::string& file, bool is_dependency = false,
|
|
|
|
bool is_interactive = false,
|
|
|
|
const std::vector<std::string>& old_args = {},
|
|
|
|
const std::vector<std::string>& new_args = {}) {
|
|
|
|
std::unique_ptr<IndexFile> opt_previous_index;
|
|
|
|
if (!old_args.empty()) {
|
2018-03-10 23:40:27 +00:00
|
|
|
opt_previous_index = std::make_unique<IndexFile>("---.cc", "<empty>");
|
2018-01-18 07:11:33 +00:00
|
|
|
opt_previous_index->args = old_args;
|
|
|
|
}
|
2018-03-31 03:16:33 +00:00
|
|
|
std::optional<std::string> from;
|
2018-01-18 07:19:08 +00:00
|
|
|
if (is_dependency)
|
|
|
|
from = std::string("---.cc");
|
2018-01-18 07:11:33 +00:00
|
|
|
return FileNeedsParse(is_interactive /*is_interactive*/,
|
|
|
|
×tamp_manager, &modification_timestamp_fetcher,
|
2018-04-30 04:49:03 +00:00
|
|
|
cache_manager, opt_previous_index.get(), file,
|
|
|
|
new_args, from);
|
2018-01-18 07:11:33 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// A file with no timestamp is not imported, since this implies the file no
|
|
|
|
// longer exists on disk.
|
2018-03-31 03:16:33 +00:00
|
|
|
modification_timestamp_fetcher.entries["bar.h"] = std::nullopt;
|
2018-01-18 07:11:33 +00:00
|
|
|
REQUIRE(check("bar.h", false /*is_dependency*/) == ShouldParse::NoSuchFile);
|
|
|
|
|
|
|
|
// A dependency is only imported once.
|
|
|
|
modification_timestamp_fetcher.entries["foo.h"] = 5;
|
|
|
|
REQUIRE(check("foo.h", true /*is_dependency*/) == ShouldParse::Yes);
|
|
|
|
REQUIRE(check("foo.h", true /*is_dependency*/) == ShouldParse::No);
|
|
|
|
|
|
|
|
// An interactive dependency is imported.
|
|
|
|
REQUIRE(check("foo.h", true /*is_dependency*/) == ShouldParse::No);
|
|
|
|
REQUIRE(check("foo.h", true /*is_dependency*/, true /*is_interactive*/) ==
|
|
|
|
ShouldParse::Yes);
|
|
|
|
|
|
|
|
// A file whose timestamp has not changed is not imported. When the
|
|
|
|
// timestamp changes (either forward or backward) it is reimported.
|
|
|
|
auto check_timestamp_change = [&](int64_t timestamp) {
|
|
|
|
modification_timestamp_fetcher.entries["aa.cc"] = timestamp;
|
|
|
|
REQUIRE(check("aa.cc") == ShouldParse::Yes);
|
|
|
|
REQUIRE(check("aa.cc") == ShouldParse::Yes);
|
|
|
|
REQUIRE(check("aa.cc") == ShouldParse::Yes);
|
|
|
|
timestamp_manager.UpdateCachedModificationTime("aa.cc", timestamp);
|
|
|
|
REQUIRE(check("aa.cc") == ShouldParse::No);
|
|
|
|
};
|
|
|
|
check_timestamp_change(5);
|
|
|
|
check_timestamp_change(6);
|
|
|
|
check_timestamp_change(5);
|
|
|
|
check_timestamp_change(4);
|
|
|
|
|
|
|
|
// Argument change implies reimport, even if timestamp has not changed.
|
|
|
|
timestamp_manager.UpdateCachedModificationTime("aa.cc", 5);
|
|
|
|
modification_timestamp_fetcher.entries["aa.cc"] = 5;
|
|
|
|
REQUIRE(check("aa.cc", false /*is_dependency*/, false /*is_interactive*/,
|
|
|
|
{"b"} /*old_args*/,
|
|
|
|
{"b", "a"} /*new_args*/) == ShouldParse::Yes);
|
|
|
|
}
|
|
|
|
|
2018-01-07 04:08:55 +00:00
|
|
|
// FIXME: validate other state like timestamp_manager, etc.
|
|
|
|
// FIXME: add more interesting tests that are not the happy path
|
|
|
|
// FIXME: test
|
|
|
|
// - IndexMain_DoCreateIndexUpdate
|
|
|
|
// - IndexMain_LoadPreviousIndex
|
|
|
|
// - QueryDb_ImportMain
|
|
|
|
|
2018-01-07 00:30:41 +00:00
|
|
|
TEST_CASE_FIXTURE(Fixture, "index request with zero results") {
|
2018-01-07 01:11:34 +00:00
|
|
|
indexer = IIndexer::MakeTestIndexer({IIndexer::TestEntry{"foo.cc", 0}});
|
2018-01-07 00:20:37 +00:00
|
|
|
|
2018-01-07 00:30:41 +00:00
|
|
|
MakeRequest("foo.cc");
|
2017-12-29 17:27:56 +00:00
|
|
|
|
2018-01-07 00:30:41 +00:00
|
|
|
REQUIRE(queue->index_request.Size() == 1);
|
2018-04-30 04:49:03 +00:00
|
|
|
REQUIRE(queue->on_id_mapped.Size() == 0);
|
2018-01-07 00:30:41 +00:00
|
|
|
PumpOnce();
|
2017-12-29 17:27:56 +00:00
|
|
|
REQUIRE(queue->index_request.Size() == 0);
|
2018-04-30 04:49:03 +00:00
|
|
|
REQUIRE(queue->on_id_mapped.Size() == 0);
|
2018-01-07 04:08:55 +00:00
|
|
|
|
|
|
|
REQUIRE(file_consumer_shared.used_files.empty());
|
2017-12-29 17:27:56 +00:00
|
|
|
}
|
2018-01-07 00:30:41 +00:00
|
|
|
|
|
|
|
TEST_CASE_FIXTURE(Fixture, "one index request") {
|
2018-01-07 01:11:34 +00:00
|
|
|
indexer = IIndexer::MakeTestIndexer({IIndexer::TestEntry{"foo.cc", 100}});
|
2018-01-07 00:30:41 +00:00
|
|
|
|
|
|
|
MakeRequest("foo.cc");
|
|
|
|
|
|
|
|
REQUIRE(queue->index_request.Size() == 1);
|
2018-04-30 04:49:03 +00:00
|
|
|
REQUIRE(queue->on_id_mapped.Size() == 0);
|
2018-01-07 00:30:41 +00:00
|
|
|
PumpOnce();
|
|
|
|
REQUIRE(queue->index_request.Size() == 0);
|
2018-04-30 04:49:03 +00:00
|
|
|
REQUIRE(queue->on_id_mapped.Size() == 100);
|
2018-01-07 04:08:55 +00:00
|
|
|
|
|
|
|
REQUIRE(file_consumer_shared.used_files.empty());
|
2018-01-07 00:30:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_CASE_FIXTURE(Fixture, "multiple index requests") {
|
2018-01-07 01:11:34 +00:00
|
|
|
indexer = IIndexer::MakeTestIndexer(
|
|
|
|
{IIndexer::TestEntry{"foo.cc", 100}, IIndexer::TestEntry{"bar.cc", 5}});
|
2018-01-07 00:30:41 +00:00
|
|
|
|
|
|
|
MakeRequest("foo.cc");
|
|
|
|
MakeRequest("bar.cc");
|
|
|
|
|
|
|
|
REQUIRE(queue->index_request.Size() == 2);
|
2018-04-30 04:49:03 +00:00
|
|
|
//REQUIRE(queue->do_id_map.Size() == 0);
|
2018-01-07 00:30:41 +00:00
|
|
|
while (PumpOnce()) {
|
|
|
|
}
|
|
|
|
REQUIRE(queue->index_request.Size() == 0);
|
2018-04-30 04:49:03 +00:00
|
|
|
//REQUIRE(queue->do_id_map.Size() == 105);
|
2018-01-07 04:08:55 +00:00
|
|
|
|
|
|
|
REQUIRE(file_consumer_shared.used_files.empty());
|
2018-01-07 00:30:41 +00:00
|
|
|
}
|
2018-01-07 00:51:55 +00:00
|
|
|
}
|