mirror of
https://github.com/MaskRay/ccls.git
synced 2024-11-28 18:41:57 +00:00
wip
This commit is contained in:
parent
56135f615c
commit
243630ca2e
@ -94,7 +94,6 @@ REGISTER_IPC_MESSAGE(Ipc_CancelRequest);
|
||||
|
||||
bool QueryDbMainLoop(Config* config,
|
||||
QueryDatabase* db,
|
||||
bool* exit_when_idle,
|
||||
MultiQueueWaiter* waiter,
|
||||
Project* project,
|
||||
FileConsumer::SharedState* file_consumer_shared,
|
||||
@ -142,7 +141,6 @@ bool QueryDbMainLoop(Config* config,
|
||||
void RunQueryDbThread(const std::string& bin_name,
|
||||
Config* config,
|
||||
MultiQueueWaiter* waiter) {
|
||||
bool exit_when_idle = false;
|
||||
Project project;
|
||||
SemanticHighlightSymbolCache semantic_cache;
|
||||
WorkingFiles working_files;
|
||||
@ -161,6 +159,7 @@ void RunQueryDbThread(const std::string& bin_name,
|
||||
auto non_global_code_complete_cache = MakeUnique<CodeCompleteCache>();
|
||||
auto signature_cache = MakeUnique<CodeCompleteCache>();
|
||||
ImportManager import_manager;
|
||||
ImportPipelineStatus import_pipeline_status;
|
||||
TimestampManager timestamp_manager;
|
||||
QueryDatabase db;
|
||||
|
||||
@ -168,11 +167,11 @@ void RunQueryDbThread(const std::string& bin_name,
|
||||
for (MessageHandler* handler : *MessageHandler::message_handlers) {
|
||||
handler->config = config;
|
||||
handler->db = &db;
|
||||
handler->exit_when_idle = &exit_when_idle;
|
||||
handler->waiter = waiter;
|
||||
handler->project = &project;
|
||||
handler->file_consumer_shared = &file_consumer_shared;
|
||||
handler->import_manager = &import_manager;
|
||||
handler->import_pipeline_status = &import_pipeline_status;
|
||||
handler->timestamp_manager = ×tamp_manager;
|
||||
handler->semantic_cache = &semantic_cache;
|
||||
handler->working_files = &working_files;
|
||||
@ -188,17 +187,11 @@ void RunQueryDbThread(const std::string& bin_name,
|
||||
SetCurrentThreadName("querydb");
|
||||
while (true) {
|
||||
bool did_work = QueryDbMainLoop(
|
||||
config, &db, &exit_when_idle, waiter, &project, &file_consumer_shared,
|
||||
config, &db, waiter, &project, &file_consumer_shared,
|
||||
&import_manager, ×tamp_manager, &semantic_cache, &working_files,
|
||||
&clang_complete, &include_complete, global_code_complete_cache.get(),
|
||||
non_global_code_complete_cache.get(), signature_cache.get());
|
||||
|
||||
// No more work left and exit request. Exit.
|
||||
if (!did_work && exit_when_idle && WorkThread::num_active_threads == 0) {
|
||||
LOG_S(INFO) << "Exiting; exit_when_idle is set and there is no more work";
|
||||
exit(0);
|
||||
}
|
||||
|
||||
// Cleanup and free any unused memory.
|
||||
FreeUnusedMemory();
|
||||
|
||||
@ -249,9 +242,12 @@ void LaunchStdinLoop(Config* config,
|
||||
if (!message)
|
||||
return WorkThread::Result::MoreWork;
|
||||
|
||||
// Cache |method_id| so we can access it after moving |message|.
|
||||
IpcId method_id = message->method_id;
|
||||
|
||||
(*request_times)[message->method_id] = Timer();
|
||||
|
||||
switch (message->method_id) {
|
||||
switch (method_id) {
|
||||
case IpcId::Initialized: {
|
||||
// TODO: don't send output until we get this notification
|
||||
break;
|
||||
@ -262,21 +258,7 @@ void LaunchStdinLoop(Config* config,
|
||||
break;
|
||||
}
|
||||
|
||||
case IpcId::Exit: {
|
||||
LOG_S(INFO) << "Exiting";
|
||||
exit(0);
|
||||
break;
|
||||
}
|
||||
|
||||
case IpcId::CqueryExitWhenIdle: {
|
||||
// querydb needs to know to exit when idle. We return out of the stdin
|
||||
// loop to exit the thread. If we keep parsing input stdin is likely
|
||||
// closed so cquery will exit.
|
||||
LOG_S(INFO) << "cquery will exit when all threads are idle";
|
||||
queue->for_querydb.Enqueue(std::move(message));
|
||||
return WorkThread::Result::ExitThread;
|
||||
}
|
||||
|
||||
case IpcId::Exit:
|
||||
case IpcId::Initialize:
|
||||
case IpcId::TextDocumentDidOpen:
|
||||
case IpcId::CqueryTextDocumentDidView:
|
||||
@ -304,18 +286,23 @@ void LaunchStdinLoop(Config* config,
|
||||
case IpcId::CqueryBase:
|
||||
case IpcId::CqueryDerived:
|
||||
case IpcId::CqueryIndexFile:
|
||||
case IpcId::CqueryQueryDbWaitForIdleIndexer: {
|
||||
case IpcId::CqueryWait: {
|
||||
queue->for_querydb.Enqueue(std::move(message));
|
||||
break;
|
||||
}
|
||||
|
||||
default: {
|
||||
LOG_S(ERROR) << "Unhandled IPC message "
|
||||
<< IpcIdToString(message->method_id);
|
||||
<< IpcIdToString(method_id);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// If the message was to exit then querydb will take care of the actual
|
||||
// exit. Stop reading from stdin since it might be detached.
|
||||
if (method_id == IpcId::Exit)
|
||||
return WorkThread::Result::ExitThread;
|
||||
|
||||
return WorkThread::Result::MoreWork;
|
||||
});
|
||||
}
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include "config.h"
|
||||
#include "file_consumer.h"
|
||||
#include "import_manager.h"
|
||||
#include "import_pipeline.h"
|
||||
#include "queue_manager.h"
|
||||
#include "project.h"
|
||||
#include "semantic_highlight_symbol_cache.h"
|
||||
@ -23,6 +24,7 @@ WorkThread::Result IndexMain(Config* config,
|
||||
FileConsumer::SharedState* file_consumer_shared,
|
||||
TimestampManager* timestamp_manager,
|
||||
ImportManager* import_manager,
|
||||
ImportPipelineStatus* status,
|
||||
Project* project,
|
||||
WorkingFiles* working_files,
|
||||
MultiQueueWaiter* waiter);
|
@ -78,6 +78,7 @@ std::vector<Index_DoIdMap> DoParseFile(
|
||||
// Always run this block, even if we are interactive, so we can check
|
||||
// dependencies and reset files in |file_consumer_shared|.
|
||||
IndexFile* previous_index = cache_loader->TryLoad(path);
|
||||
LOG_S(ERROR) << "!! DoParseFile " << path << ", previous_index=" << previous_index;
|
||||
if (previous_index) {
|
||||
// If none of the dependencies have changed and the index is not
|
||||
// interactive (ie, requested by a file save), skip parsing and just load
|
||||
@ -312,12 +313,14 @@ bool IndexMain_DoParse(Config* config,
|
||||
if (!request)
|
||||
return false;
|
||||
|
||||
LOG_S(INFO) << "IndexMain_DoParse request->path=" << request->path;
|
||||
Project::Entry entry;
|
||||
entry.filename = request->path;
|
||||
entry.args = request->args;
|
||||
std::vector<Index_DoIdMap> responses = ParseFile(
|
||||
config, working_files, index, file_consumer_shared, timestamp_manager,
|
||||
import_manager, request->is_interactive, entry, request->contents);
|
||||
LOG_S(INFO) << "IndexMain_DoParse request->path=" << request->path << " responses.size()=" << responses.size();
|
||||
|
||||
// Don't bother sending an IdMap request if there are no responses.
|
||||
if (responses.empty())
|
||||
@ -438,9 +441,12 @@ WorkThread::Result IndexMain(Config* config,
|
||||
FileConsumer::SharedState* file_consumer_shared,
|
||||
TimestampManager* timestamp_manager,
|
||||
ImportManager* import_manager,
|
||||
ImportPipelineStatus* status,
|
||||
Project* project,
|
||||
WorkingFiles* working_files,
|
||||
MultiQueueWaiter* waiter) {
|
||||
status->num_active_threads++;
|
||||
|
||||
EmitProgress(config);
|
||||
|
||||
// Build one index per-indexer, as building the index acquires a global lock.
|
||||
@ -469,6 +475,8 @@ WorkThread::Result IndexMain(Config* config,
|
||||
if (!did_parse && !did_create_update && !did_load_previous)
|
||||
did_merge = IndexMergeIndexUpdates();
|
||||
|
||||
status->num_active_threads--;
|
||||
|
||||
auto* queue = QueueManager::instance();
|
||||
|
||||
// We didn't do any work, so wait for a notification.
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include "file_consumer.h"
|
||||
|
||||
#include <atomic>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
@ -12,6 +13,10 @@ struct QueryDatabase;
|
||||
struct SemanticHighlightSymbolCache;
|
||||
struct WorkingFiles;
|
||||
|
||||
struct ImportPipelineStatus {
|
||||
std::atomic<int> num_active_threads;
|
||||
};
|
||||
|
||||
void IndexWithTuFromCodeCompletion(
|
||||
FileConsumer::SharedState* file_consumer_shared,
|
||||
ClangTranslationUnit* tu,
|
||||
|
@ -78,10 +78,8 @@ const char* IpcIdToString(IpcId id) {
|
||||
|
||||
case IpcId::CqueryIndexFile:
|
||||
return "$cquery/indexFile";
|
||||
case IpcId::CqueryQueryDbWaitForIdleIndexer:
|
||||
return "$cquery/queryDbWaitForIdleIndexer";
|
||||
case IpcId::CqueryExitWhenIdle:
|
||||
return "$cquery/exitWhenIdle";
|
||||
case IpcId::CqueryWait:
|
||||
return "$cquery/wait";
|
||||
|
||||
default:
|
||||
assert(false && "missing IpcId string name");
|
||||
|
@ -54,10 +54,8 @@ enum class IpcId : int {
|
||||
|
||||
// Index the given file contents. Used in tests.
|
||||
CqueryIndexFile,
|
||||
// Make querydb wait for the indexer to be idle. Used in tests.
|
||||
CqueryQueryDbWaitForIdleIndexer,
|
||||
// Exit after all messages have been read/processes. Used in tests.
|
||||
CqueryExitWhenIdle
|
||||
// Wait until all cquery threads are idle. Used in tests.
|
||||
CqueryWait,
|
||||
};
|
||||
MAKE_ENUM_HASHABLE(IpcId)
|
||||
MAKE_REFLECT_TYPE_PROXY(IpcId, int)
|
||||
|
@ -38,6 +38,11 @@ bool FindFileOrFail(QueryDatabase* db,
|
||||
*out_file_id = QueryFileId((size_t)-1);
|
||||
|
||||
LOG_S(INFO) << "Unable to find file \"" << absolute_path << "\"";
|
||||
LOG_S(INFO) << "Files (size=" << db->usr_to_file.size() << "): "
|
||||
<< StringJoinMap(db->usr_to_file,
|
||||
[](const std::pair<Usr, QueryFileId>& entry) {
|
||||
return entry.first;
|
||||
});
|
||||
|
||||
if (id) {
|
||||
Out_Error out;
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include "code_complete_cache.h"
|
||||
#include "config.h"
|
||||
#include "import_manager.h"
|
||||
#include "import_pipeline.h"
|
||||
#include "include_complete.h"
|
||||
#include "queue_manager.h"
|
||||
#include "project.h"
|
||||
@ -30,11 +31,11 @@
|
||||
struct MessageHandler {
|
||||
Config* config = nullptr;
|
||||
QueryDatabase* db = nullptr;
|
||||
bool* exit_when_idle = nullptr;
|
||||
MultiQueueWaiter* waiter = nullptr;
|
||||
Project* project = nullptr;
|
||||
FileConsumer::SharedState* file_consumer_shared = nullptr;
|
||||
ImportManager* import_manager = nullptr;
|
||||
ImportPipelineStatus* import_pipeline_status = nullptr;
|
||||
TimestampManager* timestamp_manager = nullptr;
|
||||
SemanticHighlightSymbolCache* semantic_cache = nullptr;
|
||||
WorkingFiles* working_files = nullptr;
|
||||
|
@ -1,21 +0,0 @@
|
||||
#include "entry_points.h"
|
||||
#include "message_handler.h"
|
||||
|
||||
#include <loguru.hpp>
|
||||
|
||||
namespace {
|
||||
struct Ipc_CqueryExitWhenIdle : public IpcMessage<Ipc_CqueryExitWhenIdle> {
|
||||
static constexpr IpcId kIpcId = IpcId::CqueryExitWhenIdle;
|
||||
};
|
||||
MAKE_REFLECT_EMPTY_STRUCT(Ipc_CqueryExitWhenIdle);
|
||||
REGISTER_IPC_MESSAGE(Ipc_CqueryExitWhenIdle);
|
||||
|
||||
struct CqueryExitWhenIdleHandler : MessageHandler {
|
||||
IpcId GetId() const override { return IpcId::CqueryExitWhenIdle; }
|
||||
void Run(std::unique_ptr<BaseIpcMessage> request) override {
|
||||
*exit_when_idle = true;
|
||||
WorkThread::request_exit_on_idle = true;
|
||||
}
|
||||
};
|
||||
REGISTER_MESSAGE_HANDLER(CqueryExitWhenIdleHandler);
|
||||
} // namespace
|
@ -1,6 +1,8 @@
|
||||
#include "message_handler.h"
|
||||
#include "platform.h"
|
||||
|
||||
#include <loguru/loguru.hpp>
|
||||
|
||||
namespace {
|
||||
struct Ipc_CqueryIndexFile : public IpcMessage<Ipc_CqueryIndexFile> {
|
||||
static constexpr IpcId kIpcId = IpcId::CqueryIndexFile;
|
||||
@ -23,6 +25,7 @@ REGISTER_IPC_MESSAGE(Ipc_CqueryIndexFile);
|
||||
|
||||
struct CqueryIndexFileHandler : BaseMessageHandler<Ipc_CqueryIndexFile> {
|
||||
void Run(Ipc_CqueryIndexFile* request) override {
|
||||
LOG_S(INFO) << "Indexing file " << request->params.path;
|
||||
QueueManager::instance()->index_request.Enqueue(Index_Request(
|
||||
NormalizePath(request->params.path), request->params.args,
|
||||
request->params.is_interactive, request->params.contents));
|
||||
|
@ -4,22 +4,26 @@
|
||||
#include <loguru.hpp>
|
||||
|
||||
namespace {
|
||||
struct Ipc_CqueryQueryDbWaitForIdleIndexer
|
||||
: public IpcMessage<Ipc_CqueryQueryDbWaitForIdleIndexer> {
|
||||
static constexpr IpcId kIpcId = IpcId::CqueryQueryDbWaitForIdleIndexer;
|
||||
struct Ipc_CqueryWait
|
||||
: public IpcMessage<Ipc_CqueryWait> {
|
||||
static constexpr IpcId kIpcId = IpcId::CqueryWait;
|
||||
};
|
||||
MAKE_REFLECT_EMPTY_STRUCT(Ipc_CqueryQueryDbWaitForIdleIndexer);
|
||||
REGISTER_IPC_MESSAGE(Ipc_CqueryQueryDbWaitForIdleIndexer);
|
||||
MAKE_REFLECT_EMPTY_STRUCT(Ipc_CqueryWait);
|
||||
REGISTER_IPC_MESSAGE(Ipc_CqueryWait);
|
||||
|
||||
struct CqueryQueryDbWaitForIdleIndexerHandler : MessageHandler {
|
||||
struct CqueryWaitHandler : MessageHandler {
|
||||
IpcId GetId() const override {
|
||||
return IpcId::CqueryQueryDbWaitForIdleIndexer;
|
||||
return IpcId::CqueryWait;
|
||||
}
|
||||
void Run(std::unique_ptr<BaseIpcMessage> request) override {
|
||||
// TODO: use status message system here, then run querydb as normal? Maybe
|
||||
// this cannot be a normal message, ie, it needs to be re-entrant.
|
||||
|
||||
LOG_S(INFO) << "Waiting for idle";
|
||||
int idle_count = 0;
|
||||
while (true) {
|
||||
bool has_work = false;
|
||||
has_work |= import_pipeline_status->num_active_threads != 0;
|
||||
has_work |= import_manager->HasActiveQuerydbImports();
|
||||
has_work |= QueueManager::instance()->HasWork();
|
||||
has_work |= QueryDb_ImportMain(config, db, import_manager, semantic_cache,
|
||||
@ -37,5 +41,5 @@ struct CqueryQueryDbWaitForIdleIndexerHandler : MessageHandler {
|
||||
LOG_S(INFO) << "Done waiting for idle";
|
||||
}
|
||||
};
|
||||
REGISTER_MESSAGE_HANDLER(CqueryQueryDbWaitForIdleIndexerHandler);
|
||||
REGISTER_MESSAGE_HANDLER(CqueryWaitHandler);
|
||||
} // namespace
|
@ -174,7 +174,8 @@ struct InitializeHandler : BaseMessageHandler<Ipc_InitializeRequest> {
|
||||
for (int i = 0; i < config->indexerCount; ++i) {
|
||||
WorkThread::StartThread("indexer" + std::to_string(i), [=]() {
|
||||
return IndexMain(config, file_consumer_shared, timestamp_manager,
|
||||
import_manager, project, working_files, waiter);
|
||||
import_manager, import_pipeline_status, project,
|
||||
working_files, waiter);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -1,8 +1,10 @@
|
||||
#pragma once
|
||||
|
||||
#include <optional.h>
|
||||
#include "utils.h"
|
||||
#include "work_thread.h"
|
||||
|
||||
#include <optional.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
@ -43,11 +45,8 @@ struct MultiQueueWaiter {
|
||||
// HasState() is called data gets posted but before we begin waiting for
|
||||
// the condition variable, we will miss the notification. The timeout of 5
|
||||
// means that if this happens we will delay operation for 5 seconds.
|
||||
//
|
||||
// If we're trying to exit (WorkThread::request_exit_on_idle), do not
|
||||
// bother waiting.
|
||||
|
||||
while (!HasState(queues) && !WorkThread::request_exit_on_idle) {
|
||||
while (!HasState(queues)) {
|
||||
std::unique_lock<std::mutex> l(m);
|
||||
cv.wait_for(l, std::chrono::seconds(5));
|
||||
}
|
||||
|
@ -2,26 +2,17 @@
|
||||
|
||||
#include "platform.h"
|
||||
|
||||
std::atomic<int> WorkThread::num_active_threads;
|
||||
std::atomic<bool> WorkThread::request_exit_on_idle;
|
||||
|
||||
// static
|
||||
void WorkThread::StartThread(const std::string& thread_name,
|
||||
const std::function<Result()>& entry_point) {
|
||||
new std::thread([thread_name, entry_point]() {
|
||||
SetCurrentThreadName(thread_name);
|
||||
|
||||
++num_active_threads;
|
||||
|
||||
// Main loop.
|
||||
while (true) {
|
||||
Result result = entry_point();
|
||||
if (result == Result::ExitThread)
|
||||
break;
|
||||
if (request_exit_on_idle && result == Result::NoWork)
|
||||
break;
|
||||
}
|
||||
|
||||
--num_active_threads;
|
||||
});
|
||||
}
|
@ -9,13 +9,9 @@
|
||||
// Helper methods for starting threads that do some work. Enables test code to
|
||||
// wait for all work to complete.
|
||||
struct WorkThread {
|
||||
// FIXME: remove result, have entry_point run a while(true) loop.
|
||||
enum class Result { MoreWork, NoWork, ExitThread };
|
||||
|
||||
// The number of active worker threads.
|
||||
static std::atomic<int> num_active_threads;
|
||||
// Set to true to request all work thread instances to exit.
|
||||
static std::atomic<bool> request_exit_on_idle;
|
||||
|
||||
// Launch a new thread. |entry_point| will be called continously. It should
|
||||
// return true if it there is still known work to be done.
|
||||
static void StartThread(const std::string& thread_name,
|
||||
|
109
test_runner_e2e.py
Normal file → Executable file
109
test_runner_e2e.py
Normal file → Executable file
@ -1,22 +1,29 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import json
|
||||
import re
|
||||
import shlex
|
||||
import shutil
|
||||
from subprocess import Popen, PIPE
|
||||
|
||||
|
||||
CQUERY_PATH = 'x64/Debug/cquery.exe'
|
||||
# FIXME: instead of $cquery/exitWhenIdle, just send $cquery/wait and the normal
|
||||
# lsp exit. This requires renaming $cquery/queryDbWaitForIdle to just
|
||||
# $cquery/wait.
|
||||
|
||||
CQUERY_PATH = 'build/asan/bin/cquery'
|
||||
CACHE_DIR = 'e2e_CACHE'
|
||||
|
||||
# Content-Length: ...\r\n
|
||||
# \r\n
|
||||
# {
|
||||
# "jsonrpc": "2.0",
|
||||
# "id": 1,
|
||||
# "method": "textDocument/didOpen",
|
||||
# "params": {
|
||||
# ...
|
||||
# }
|
||||
# }
|
||||
# Content-Length: ...\r\n
|
||||
# \r\n
|
||||
# {
|
||||
# "jsonrpc": "2.0",
|
||||
# "id": 1,
|
||||
# "method": "textDocument/didOpen",
|
||||
# "params": {
|
||||
# ...
|
||||
# }
|
||||
# }
|
||||
|
||||
# We write test files in python. The test runner collects all python files in
|
||||
# the directory and executes them. The test function just creates a test object
|
||||
@ -25,6 +32,7 @@ CACHE_DIR = 'e2e_CACHE'
|
||||
# Test functions are automatically discovered; they just need to be in the
|
||||
# global environment and start with `Test_`.
|
||||
|
||||
|
||||
class TestBuilder:
|
||||
def __init__(self):
|
||||
self.sent = []
|
||||
@ -32,7 +40,7 @@ class TestBuilder:
|
||||
|
||||
def IndexFile(self, path, contents):
|
||||
"""
|
||||
Writes the file contents to disk so that the language server can access it.
|
||||
Indexes the given file with contents.
|
||||
"""
|
||||
self.Send({
|
||||
'method': '$cquery/indexFile',
|
||||
@ -41,9 +49,7 @@ class TestBuilder:
|
||||
'contents': contents,
|
||||
'args': [
|
||||
'-xc++',
|
||||
'-std=c++11',
|
||||
'-isystemC:/Program Files (x86)/Microsoft Visual Studio/2017/Community/VC/Tools/MSVC/14.10.25017/include',
|
||||
'-isystemC:/Program Files (x86)/Windows Kits/10/Include/10.0.15063.0/ucrt'
|
||||
'-std=c++11'
|
||||
]
|
||||
}
|
||||
})
|
||||
@ -51,9 +57,9 @@ class TestBuilder:
|
||||
|
||||
def WaitForIdle(self):
|
||||
"""
|
||||
Blocks the querydb thread until any active imports are complete.
|
||||
cquery will pause processing messages until it is idle.
|
||||
"""
|
||||
self.Send({'method': '$cquery/queryDbWaitForIdleIndexer'})
|
||||
self.Send({'method': '$cquery/wait'})
|
||||
return self
|
||||
|
||||
def Send(self, stdin):
|
||||
@ -85,8 +91,7 @@ class TestBuilder:
|
||||
'capabilities': {},
|
||||
'trace': 'off',
|
||||
'initializationOptions': {
|
||||
'cacheDirectory': CACHE_DIR,
|
||||
'clientVersion': -1 # Disables the check
|
||||
'cacheDirectory': CACHE_DIR
|
||||
}
|
||||
}
|
||||
})
|
||||
@ -98,10 +103,10 @@ class TestBuilder:
|
||||
'hoverProvider': True,
|
||||
'completionProvider': {
|
||||
'resolveProvider': False,
|
||||
'triggerCharacters': [ '.', ':', '>', '#' ]
|
||||
'triggerCharacters': ['.', ':', '>', '#']
|
||||
},
|
||||
'signatureHelpProvider': {
|
||||
'triggerCharacters': [ '(', ',' ]
|
||||
'triggerCharacters': ['(', ',']
|
||||
},
|
||||
'definitionProvider': True,
|
||||
'referencesProvider': True,
|
||||
@ -123,18 +128,24 @@ class TestBuilder:
|
||||
})
|
||||
return self
|
||||
|
||||
|
||||
def _ExecuteTest(name, func):
|
||||
"""
|
||||
Executes a specific test.
|
||||
|
||||
|func| must return a TestBuilder object.
|
||||
"""
|
||||
|
||||
# Delete cache directory.
|
||||
shutil.rmtree(CACHE_DIR, ignore_errors=True)
|
||||
|
||||
test_builder = func()
|
||||
if not isinstance(test_builder, TestBuilder):
|
||||
raise Exception('%s does not return a TestBuilder instance' % name)
|
||||
|
||||
# Add a final exit message.
|
||||
test_builder.Send({ 'method': '$cquery/exitWhenIdle' })
|
||||
test_builder.Send({'method': '$cquery/wait'})
|
||||
test_builder.Send({'method': 'exit'})
|
||||
|
||||
# Convert messages to a stdin byte array.
|
||||
stdin = ''
|
||||
@ -151,7 +162,15 @@ def _ExecuteTest(name, func):
|
||||
start = match.span()[1]
|
||||
length = int(match.groups()[0])
|
||||
message = string[start:start + length]
|
||||
messages.append(json.loads(message))
|
||||
decoded = json.loads(message)
|
||||
# Do not report '$cquery/progress' messages.
|
||||
if 'method' in decoded and decoded['method'] == '$cquery/progress':
|
||||
continue
|
||||
# Do not report 'textDocument/publishDiagnostic' messages.
|
||||
if 'method' in decoded and decoded['method'] == 'textDocument/publishDiagnostics':
|
||||
continue
|
||||
|
||||
messages.append(decoded)
|
||||
return messages
|
||||
|
||||
# Utility method to print a byte array.
|
||||
@ -160,10 +179,10 @@ def _ExecuteTest(name, func):
|
||||
print(line.decode('utf8'))
|
||||
|
||||
# Execute program.
|
||||
cmd = "%s --language-server" % CQUERY_PATH
|
||||
cmd = "%s --language-server --log-all-to-stderr" % CQUERY_PATH
|
||||
process = Popen(shlex.split(cmd), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||
(stdout, stderr) = process.communicate(stdin_bytes)
|
||||
exit_code = process.wait();
|
||||
exit_code = process.wait()
|
||||
|
||||
# Check if test succeeded.
|
||||
actual = GetMessages(stdout.decode('utf8'))
|
||||
@ -215,6 +234,7 @@ def _DiscoverTests():
|
||||
continue
|
||||
yield (name, value)
|
||||
|
||||
|
||||
def _RunTests():
|
||||
"""
|
||||
Executes all tests.
|
||||
@ -223,16 +243,13 @@ def _RunTests():
|
||||
_ExecuteTest(name, func)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#### EXAMPLE TESTS ####
|
||||
|
||||
|
||||
class lsSymbolKind:
|
||||
Function = 1
|
||||
|
||||
|
||||
def lsSymbolInfo(name, position, kind):
|
||||
return {
|
||||
'name': name,
|
||||
@ -240,15 +257,16 @@ def lsSymbolInfo(name, position, kind):
|
||||
'kind': kind
|
||||
}
|
||||
|
||||
|
||||
def DISABLED_Test_Init():
|
||||
return (TestBuilder()
|
||||
.SetupCommonInit()
|
||||
)
|
||||
|
||||
|
||||
def Test_Outline():
|
||||
return (TestBuilder()
|
||||
.SetupCommonInit()
|
||||
# .IndexFile("file:///C%3A/Users/jacob/Desktop/cquery/foo.cc",
|
||||
.IndexFile("foo.cc",
|
||||
"""void foobar();""")
|
||||
.WaitForIdle()
|
||||
@ -257,21 +275,30 @@ def Test_Outline():
|
||||
'method': 'textDocument/documentSymbol',
|
||||
'params': {
|
||||
'textDocument': {
|
||||
'uri': 'C:/Users/jacob/Desktop/cquery/foo.cc'
|
||||
'uri': 'foo.cc'
|
||||
}
|
||||
}
|
||||
})
|
||||
# .Expect({
|
||||
# 'jsonrpc': '2.0',
|
||||
# 'id': 1,
|
||||
# 'error': {'code': -32603, 'message': 'Unable to find file '}
|
||||
# }))
|
||||
.Expect({
|
||||
'id': 1,
|
||||
'result': [
|
||||
lsSymbolInfo('void main()', (1, 1), lsSymbolKind.Function)
|
||||
]
|
||||
}))
|
||||
'jsonrpc': '2.0',
|
||||
'id': 1, 'result': [
|
||||
{
|
||||
'containerName': 'void foobar()',
|
||||
'kind': 12,
|
||||
'name': 'foobar',
|
||||
'location': {
|
||||
'range': {
|
||||
'start': {
|
||||
'line': 0,
|
||||
'character': 5},
|
||||
'end': {
|
||||
'line': 0,
|
||||
'character': 11
|
||||
}
|
||||
},
|
||||
'uri': 'file://foo.cc'
|
||||
}
|
||||
}]}))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
Loading…
Reference in New Issue
Block a user