Skip to content

Instantly share code, notes, and snippets.

@dcampos
Created October 28, 2024 01:47
Show Gist options
  • Select an option

  • Save dcampos/fed8c2d72555bb1548331146bc166150 to your computer and use it in GitHub Desktop.

Select an option

Save dcampos/fed8c2d72555bb1548331146bc166150 to your computer and use it in GitHub Desktop.
all:
g++ nvim.cpp -o nvim_test -I msgpack/include/ -lboost_filesystem -lboost_system
#include <boost/process/pipe.hpp>
#include <iostream>
#define LOG(what) \
std::cout << "[LOG] " << what << std::endl;
#include <boost/asio.hpp>
#include <boost/process.hpp>
#include <msgpack.hpp>
#include <future>
#include <queue>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <unordered_map>
#include <functional>
namespace bp = boost::process;
class NeovimClient {
public:
enum class MessageType {
REQUEST = 0,
RESPONSE = 1,
NOTIFICATION = 2
};
struct Message {
MessageType type;
uint32_t msgid;
std::string method;
msgpack::object params;
};
NeovimClient() : work_guard_(io_context_.get_executor()) {
io_thread_ = std::thread([this]() { io_context_.run(); });
unpacker_.reserve_buffer(initial_buffer_size_);
}
~NeovimClient() {
stop();
if (io_thread_.joinable()) {
io_thread_.join();
}
if (nvim_process_) {
nvim_process_->terminate();
}
}
bool connect_to_embedded() {
try {
LOG("will connect");
// Create pipes for stdin and stdout
in_stream_ = std::make_unique<bp::async_pipe>(io_context_);
out_stream_ = std::make_unique<bp::async_pipe>(io_context_);
LOG("create pipes");
// Launch Neovim process
nvim_process_ = std::make_unique<bp::child>(
io_context_,
bp::search_path("nvim"),
"--embed",
"--headless",
"/tmp/lorem.txt",
bp::std_in < *in_stream_,
bp::std_out > *out_stream_
);
LOG("created process");
// Start reading loop
start_read();
return true;
} catch (const std::exception& e) {
LOG(e.what());
return false;
}
}
template<typename... Args>
std::future<msgpack::object> request(const std::string& method, Args&&... args) {
static uint32_t msgid = 0;
auto promise = std::make_shared<std::promise<msgpack::object>>();
auto future = promise->get_future();
msgpack::sbuffer buffer;
msgpack::packer<msgpack::sbuffer> pk(&buffer);
// Pack the request: [type, msgid, method, params]
pk.pack_array(4);
pk.pack(static_cast<int>(MessageType::REQUEST));
pk.pack(++msgid);
pk.pack(method);
// Pack parameters
pk.pack_array(sizeof...(Args));
(pk.pack(std::forward<Args>(args)), ...);
{
std::lock_guard<std::mutex> lock(pending_requests_mutex_);
pending_requests_[msgid] = promise;
}
// Send the msgpack data
boost::asio::async_write(
*in_stream_,
boost::asio::buffer(buffer.data(), buffer.size()),
[](const boost::system::error_code& ec, std::size_t) {
if (ec) {
// Handle error
}
});
return future;
}
void register_notification_handler(
const std::string& method,
std::function<void(const msgpack::object&)> handler) {
std::lock_guard<std::mutex> lock(notification_handlers_mutex_);
notification_handlers_[method] = std::move(handler);
}
void stop() {
if (nvim_process_ && nvim_process_->running()) {
nvim_process_->terminate();
}
io_context_.stop();
}
private:
void start_read() {
// Get the buffer pointer from the unpacker
char* buffer = unpacker_.buffer();
size_t buffer_capacity = unpacker_.buffer_capacity();
boost::asio::async_read(
*out_stream_,
boost::asio::buffer(buffer, buffer_capacity),
boost::asio::transfer_at_least(1),
[this](const boost::system::error_code& ec, std::size_t length) {
handle_read(ec, length);
});
}
void handle_read(const boost::system::error_code& ec, std::size_t length) {
if (ec) {
// Handle error
return;
}
// Tell unpacker how many bytes were read
unpacker_.buffer_consumed(length);
// Process all complete messages in the buffer
msgpack::object_handle oh;
while (unpacker_.next(oh)) {
process_message(oh.get());
}
// If the buffer is getting full, expand it
if (unpacker_.buffer_capacity() < initial_buffer_size_) {
unpacker_.reserve_buffer(initial_buffer_size_);
}
// Continue reading
start_read();
}
void process_message(const msgpack::object& obj) {
try {
if (obj.type != msgpack::type::ARRAY || obj.via.array.size < 3) {
return;
}
Message msg;
msg.type = static_cast<MessageType>(obj.via.array.ptr[0].as<int>());
msg.msgid = obj.via.array.ptr[1].as<uint32_t>();
LOG("got type: " << static_cast<int>(msg.type));
LOG("got msgid: " << msg.msgid);
LOG("got obj: " << obj);
switch (msg.type) {
case MessageType::RESPONSE: {
std::shared_ptr<std::promise<msgpack::object>> promise;
{
std::lock_guard<std::mutex> lock(pending_requests_mutex_);
auto it = pending_requests_.find(msg.msgid);
if (it != pending_requests_.end()) {
promise = it->second;
pending_requests_.erase(it);
}
}
if (promise) {
promise->set_value(obj.via.array.ptr[3]);
}
break;
}
case MessageType::NOTIFICATION: {
msg.method = obj.via.array.ptr[1].as<std::string>();
msg.params = obj.via.array.ptr[2];
std::lock_guard<std::mutex> lock(notification_handlers_mutex_);
auto it = notification_handlers_.find(msg.method);
if (it != notification_handlers_.end()) {
it->second(msg.params);
}
break;
}
default:
break;
}
} catch (const std::exception& e) {
// Handle error
}
}
static constexpr size_t initial_buffer_size_ = 4096;
boost::asio::io_context io_context_;
boost::asio::executor_work_guard<boost::asio::io_context::executor_type> work_guard_;
std::thread io_thread_;
std::unique_ptr<bp::child> nvim_process_;
std::unique_ptr<bp::async_pipe> in_stream_;
std::unique_ptr<bp::async_pipe> out_stream_;
msgpack::unpacker unpacker_;
std::mutex pending_requests_mutex_;
std::unordered_map<uint32_t, std::shared_ptr<std::promise<msgpack::object>>> pending_requests_;
std::mutex notification_handlers_mutex_;
std::unordered_map<std::string, std::function<void(const msgpack::object&)>> notification_handlers_;
};
int main() {
NeovimClient client;
std::cout << "created client" << std::endl;
// Connect to embedded Neovim instance
if (!client.connect_to_embedded()) {
std::cerr << "Failed to connect" << std::endl;
return 1;
}
std::cout << "connected" << std::endl;
// Register notification handler
client.register_notification_handler("redraw", [](const msgpack::object& params) {
// Handle UI updates
});
// Make an async request
auto future = client.request("nvim_get_current_line");
auto future2 = client.request("nvim_get_current_buf");
auto future3 = client.request("nvim_get_current_win");
auto future4 = client.request("nvim_get_current_tabpage");
std::cout << "sent request" << std::endl;
try {
auto result = future.get(); // Wait for response
LOG("got result: " << result);
std::string line = result.as<std::string>();
LOG("got line");
std::cout << "Current line: " << line << std::endl;
} catch (const std::exception& e) {
std::cerr << "Error: " << e.what() << std::endl;
}
return 0;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment