Parse now returns a vector of unique_ptrs

This commit is contained in:
Jacob Dufault 2017-04-08 13:00:08 -07:00
parent f0e8d0110c
commit 460a428ced
7 changed files with 162 additions and 87 deletions

View File

@ -67,10 +67,10 @@ struct Index_OnIndexed {
// TODO: Rename TypedBidiMessageQueue to IpcTransport?
using IpcMessageQueue = TypedBidiMessageQueue<IpcId, BaseIpcMessage>;
using Index_DoIndexQueue = ThreadedQueue<std::unique_ptr<Index_DoIndex>>;
using Index_DoIdMapQueue = ThreadedQueue<std::unique_ptr<Index_DoIdMap>>;
using Index_OnIdMappedQueue = ThreadedQueue<std::unique_ptr<Index_OnIdMapped>>;
using Index_OnIndexedQueue = ThreadedQueue<std::unique_ptr<Index_OnIndexed>>;
using Index_DoIndexQueue = ThreadedQueue<Index_DoIndex>;
using Index_DoIdMapQueue = ThreadedQueue<Index_DoIdMap>;
using Index_OnIdMappedQueue = ThreadedQueue<Index_OnIdMapped>;
using Index_OnIndexedQueue = ThreadedQueue<Index_OnIndexed>;
template<typename TMessage>
void SendMessage(IpcMessageQueue& t, MessageQueue* destination, TMessage& message) {
@ -291,44 +291,57 @@ void WriteToCache(std::string filename, IndexedFile& file) {
bool IndexMain_DoIndex(Index_DoIndexQueue* queue_do_index,
Index_DoIdMapQueue* queue_do_id_map) {
optional<std::unique_ptr<Index_DoIndex>> opt_index_request = queue_do_index->TryDequeue();
if (!opt_index_request)
optional<Index_DoIndex> index_request = queue_do_index->TryDequeue();
if (!index_request)
return false;
std::unique_ptr<Index_DoIndex> index_request = std::move(opt_index_request.value());
Timer time;
std::unique_ptr<IndexedFile> old_index = LoadCachedFile(index_request->path);
time.ResetAndPrint("Loading cached index");
// If the index update is an import, then we will load the previous index
// into memory if we have a previous index. After that, we dispatch an
// update request to get the latest version.
if (old_index && index_request->type == Index_DoIndex::Type::Import) {
auto response = MakeUnique<Index_DoIdMap>(nullptr /*previous*/, std::move(old_index) /*current*/);
if (index_request->type == Index_DoIndex::Type::Import) {
index_request->type = Index_DoIndex::Type::Update;
std::unique_ptr<IndexedFile> old_index = LoadCachedFile(index_request->path);
time.ResetAndPrint("Loading cached index");
// If import fails just do a standard update.
if (old_index) {
Index_DoIdMap response(nullptr, std::move(old_index));
queue_do_id_map->Enqueue(std::move(response));
index_request->type = Index_DoIndex::Type::Update;
queue_do_index->Enqueue(std::move(index_request));
queue_do_index->Enqueue(std::move(*index_request));
return true;
}
else {
}
// Parse request and send a response.
std::cerr << "Parsing file " << index_request->path << " with args "
<< Join(index_request->args, ", ") << std::endl;
// TODO: parse should return unique_ptr. Then we can eliminate copy below. Make sure to not
// reuse moved pointer in WriteToCache if we do so.
IndexedFile current_index = Parse(index_request->path, index_request->args);
std::vector<std::unique_ptr<IndexedFile>> indexes = Parse(index_request->path, index_request->args);
time.ResetAndPrint("Parsing/indexing");
auto response = MakeUnique<Index_DoIdMap>(std::move(old_index) /*previous*/, MakeUnique<IndexedFile>(current_index) /*current*/);
queue_do_id_map->Enqueue(std::move(response));
for (auto& current_index : indexes) {
std::unique_ptr<IndexedFile> old_index = LoadCachedFile(current_index->path);
time.ResetAndPrint("Loading cached index");
// TODO: Cache to disk on a separate thread. Maybe we do the cache after we
// have imported the index (so the import pipeline has five stages instead
// of the current 4).
// Cache file so we can diff it later.
WriteToCache(index_request->path, current_index);
WriteToCache(index_request->path, *current_index);
time.ResetAndPrint("Cache index update to disk");
// Send response to create id map.
Index_DoIdMap response(std::move(old_index), std::move(current_index));
queue_do_id_map->Enqueue(std::move(response));
}
return true;
@ -336,16 +349,15 @@ bool IndexMain_DoIndex(Index_DoIndexQueue* queue_do_index,
bool IndexMain_DoCreateIndexUpdate(Index_OnIdMappedQueue* queue_on_id_mapped,
Index_OnIndexedQueue* queue_on_indexed) {
optional<std::unique_ptr<Index_OnIdMapped>> opt_response = queue_on_id_mapped->TryDequeue();
if (!opt_response)
optional<Index_OnIdMapped> response = queue_on_id_mapped->TryDequeue();
if (!response)
return false;
std::unique_ptr<Index_OnIdMapped> response = std::move(opt_response.value());
Timer time;
IndexUpdate update = IndexUpdate::CreateDelta(response->previous_id_map.get(), response->current_id_map.get(),
response->previous_index.get(), response->current_index.get());
time.ResetAndPrint("Creating delta IndexUpdate");
auto reply = MakeUnique<Index_OnIndexed>(update);
Index_OnIndexed reply(update);
queue_on_indexed->Enqueue(std::move(reply));
time.ResetAndPrint("Sending update to server");
@ -370,15 +382,9 @@ void IndexMain(Index_DoIndexQueue* queue_do_index,
}
QueryableFile* FindFile(QueryableDatabase* db, const std::string& filename) {
// std::cerr << "Wanted file " << msg->document << std::endl;
// TODO: hashmap lookup.
for (auto& file : db->files) {
// std::cerr << " - Have file " << file.file_id << std::endl;
if (file.def.usr == filename) {
//std::cerr << "Found file " << filename << std::endl;
return &file;
}
}
auto it = db->usr_to_symbol.find(filename);
if (it != db->usr_to_symbol.end())
return &db->files[it->second.idx];
std::cerr << "Unable to find file " << filename << std::endl;
return nullptr;
@ -635,9 +641,9 @@ void QueryDbMainLoop(
<< "] Dispatching index request for file " << filepath
<< std::endl;
auto request = MakeUnique<Index_DoIndex>(Index_DoIndex::Type::Import);
request->path = filepath;
request->args = entry.args;
Index_DoIndex request(Index_DoIndex::Type::Import);
request.path = filepath;
request.args = entry.args;
queue_do_index->Enqueue(std::move(request));
}
std::cerr << "Done" << std::endl;
@ -932,29 +938,30 @@ void QueryDbMainLoop(
while (true) {
optional<std::unique_ptr<Index_DoIdMap>> opt_request = queue_do_id_map->TryDequeue();
if (!opt_request)
optional<Index_DoIdMap> request = queue_do_id_map->TryDequeue();
if (!request)
break;
std::unique_ptr<Index_DoIdMap> request = std::move(opt_request.value());
auto response = MakeUnique<Index_OnIdMapped>();
Index_OnIdMapped response;
Timer time;
if (request->previous) {
response->previous_id_map = MakeUnique<IdMap>(db, request->previous->id_cache);
response->previous_index = std::move(request->previous);
response.previous_id_map = MakeUnique<IdMap>(db, request->previous->id_cache);
response.previous_index = std::move(request->previous);
}
response->current_id_map = MakeUnique<IdMap>(db, request->current->id_cache);
response->current_index = std::move(request->current);
assert(request->current);
response.current_id_map = MakeUnique<IdMap>(db, request->current->id_cache);
response.current_index = std::move(request->current);
time.ResetAndPrint("Create IdMap");
queue_on_id_mapped->Enqueue(std::move(response));
}
while (true) {
optional<std::unique_ptr<Index_OnIndexed>> opt_response = queue_on_indexed->TryDequeue();
if (!opt_response)
optional<Index_OnIndexed> response = queue_on_indexed->TryDequeue();
if (!response)
break;
std::unique_ptr<Index_OnIndexed> response = std::move(opt_response.value());
Timer time;
db->ApplyIndexUpdate(&response->update);

24
src/file_consumer.cc Normal file
View File

@ -0,0 +1,24 @@
#include "file_consumer.h"
FileConsumer::FileConsumer(SharedState* shared_state) : shared_(shared_state) {}
void FileConsumer::ClearOwnership() {
for (auto& entry : local_)
entry.second = Ownership::DoesNotOwn;
}
bool FileConsumer::DoesOwnFile(const std::string& file) {
// Try to find cached local result.
auto it = local_.find(file);
if (it != local_.end())
return it->second == Ownership::Owns;
// No result in local; we need to query global.
bool did_insert = false;
{
std::lock_guard<std::mutex> lock(shared_->muetx);
did_insert = shared_->files.insert(file).second;
}
local_[file] = did_insert ? Ownership::Owns : Ownership::DoesNotOwn;
return did_insert;
}

37
src/file_consumer.h Normal file
View File

@ -0,0 +1,37 @@
#pragma once
#include <mutex>
#include <unordered_set>
#include <unordered_map>
// FileConsumer is used by the indexer. When it encouters a file, it tries to
// take ownership over it. If the indexer has ownership over a file, it will
// produce an index, otherwise, it will emit nothing for that declarations
// and references coming from that file.
//
// The indexer does this because header files do not have their own translation
// units but we still want to index them.
struct FileConsumer {
struct SharedState {
mutable std::unordered_set<std::string> files;
mutable std::mutex muetx;
};
FileConsumer(SharedState* shared_state);
// Returns true if this instance owns given |file|. This will also attempt to
// take ownership over |file|.
bool DoesOwnFile(const std::string& file);
// Clear all ownership state.
void ClearOwnership();
private:
enum class Ownership {
Owns,
DoesNotOwn
};
std::unordered_map<std::string, Ownership> local_;
SharedState* shared_;
};

View File

@ -1263,12 +1263,7 @@ void indexEntityReference(CXClientData client_data,
}
}
IndexedFile Parse(std::string filename,
std::vector<std::string> args,
bool dump_ast) {
// TODO: We are currently emitting too much information for things not in the main file. If we're
// not in the main file, we should only emit references.
std::vector<std::unique_ptr<IndexedFile>> Parse(std::string filename, std::vector<std::string> args, bool dump_ast) {
clang_enableStackTraces();
clang_toggleCrashRecovery(1);
@ -1300,9 +1295,9 @@ IndexedFile Parse(std::string filename,
*/
};
IndexedFile db(filename);
auto db = MakeUnique<IndexedFile>(filename);
NamespaceHelper ns;
IndexParam param(&db, &ns);
IndexParam param(db.get(), &ns);
std::cerr << "!! [START] Indexing " << filename << std::endl;
clang_indexTranslationUnit(index_action, &param, callbacks, sizeof(callbacks),
@ -1311,5 +1306,7 @@ IndexedFile Parse(std::string filename,
std::cerr << "!! [END] Indexing " << filename << std::endl;
clang_IndexAction_dispose(index_action);
return db;
std::vector<std::unique_ptr<IndexedFile>> result;
result.emplace_back(std::move(db));
return std::move(result);
}

View File

@ -448,6 +448,4 @@ struct IndexedFile {
std::string ToString();
};
IndexedFile Parse(std::string filename,
std::vector<std::string> args,
bool dump_ast = false);
std::vector<std::unique_ptr<IndexedFile>> Parse(std::string filename, std::vector<std::string> args, bool dump_ast = false);

View File

@ -23,10 +23,7 @@ std::vector<CompilationEntry> LoadFromDirectoryListing(const std::string& projec
std::vector<std::string> files = GetFilesInFolder(project_directory, true /*recursive*/, true /*add_folder_to_path*/);
for (const std::string& file : files) {
if (EndsWith(file, ".cc") || EndsWith(file, ".cpp") ||
EndsWith(file, ".c") || EndsWith(file, ".h") ||
EndsWith(file, ".hpp")) {
if (EndsWith(file, ".cc") || EndsWith(file, ".cpp") || EndsWith(file, ".c") || EndsWith(file, ".h")) {
CompilationEntry entry;
entry.filename = NormalizePath(file);
entry.args = args;

View File

@ -72,20 +72,20 @@ void DiffDocuments(std::string path, rapidjson::Document& expected, rapidjson::D
if (actual_output.size() > len) {
std::cout << "Additional output in actual:" << std::endl;
for (int i = len; i < actual_output.size(); ++i)
for (size_t i = len; i < actual_output.size(); ++i)
std::cout << " " << actual_output[i] << std::endl;
}
if (expected_output.size() > len) {
std::cout << "Additional output in expected:" << std::endl;
for (int i = len; i < expected_output.size(); ++i)
for (size_t i = len; i < expected_output.size(); ++i)
std::cout << " " << expected_output[i] << std::endl;
}
}
void VerifySerializeToFrom(IndexedFile& file) {
std::string expected = file.ToString();
std::string actual = Deserialize("--.cc", Serialize(file)).value().ToString();
void VerifySerializeToFrom(IndexedFile* file) {
std::string expected = file->ToString();
std::string actual = Deserialize("--.cc", Serialize(*file)).value().ToString();
if (expected != actual) {
std::cerr << "Serialization failure" << std::endl;;
assert(false);
@ -123,7 +123,7 @@ void RunTests() {
// Run test.
std::cout << "[START] " << path << std::endl;
IndexedFile db = Parse(path, {
std::vector<std::unique_ptr<IndexedFile>> dbs = Parse(path, {
"-xc++",
"-std=c++11",
"-IC:/Users/jacob/Desktop/superindex/indexer/third_party/",
@ -131,8 +131,23 @@ void RunTests() {
"-IC:/Users/jacob/Desktop/superindex/indexer/third_party/rapidjson/include",
"-IC:/Users/jacob/Desktop/superindex/indexer/src"
}, false /*dump_ast*/);
VerifySerializeToFrom(db);
std::string actual_output = db.ToString();
// TODO: Supporting tests for more than just primary indexed file.
// Find primary file.
std::unique_ptr<IndexedFile> db;
for (auto& i : dbs) {
if (i->path == path) {
db = std::move(i);
break;
}
}
// TODO: Always pass IndexedFile by pointer, ie, search and remove all IndexedFile& refs.
// TODO: Rename IndexedFile to IndexFile
VerifySerializeToFrom(db.get());
std::string actual_output = db->ToString();
rapidjson::Document actual;
actual.Parse(actual_output.c_str());