Skip to content
Snippets Groups Projects

Draft: Feature/mongo db

Open Joana Plewnia requested to merge feature/MongoDB into master
Compare and
11 files
+ 601
234
Compare changes
  • Side-by-side
  • Inline
Files
11
@@ -39,13 +39,69 @@ namespace armarx::armem::server::ltm::util::mongodb
return ret;
}
//Only for MongoCXX Version < 3.5.0 required.
std::vector <std::string>
list_database_names(mongocxx::client &client) {
std::vector <std::string> database_names;
auto dbs = client.list_databases();
for (auto &&view: dbs) {
std::string name = std::string(view["name"].get_utf8().value);
database_names.push_back(name);
}
return database_names;
}
//Only for MongoCXX Version < 3.4.0 required.
std::vector <std::string>
list_collection_names(mongocxx::database &db) {
std::vector <std::string> collection_names;
auto dbs = db.list_collections();;
for (auto &&view: dbs) {
std::string name = std::string(view["name"].get_utf8().value);
collection_names.push_back(name);
}
return collection_names;
}
bool
insert(mongocxx::collection& coll, const nlohmann::json& value)
insert(mongocxx::database& db, mongocxx::collection& coll, const nlohmann::json& value)
{
nlohmann::json json = value;
std::string v = value.dump();
auto q = bsoncxx::from_json(v);
auto res = coll.insert_one(q.view());
size_t document_size = q.view().length();
if (document_size > 16 * 1024 * 1024) { // 16 MB Document
ARMARX_INFO << deactivateSpam() << "Document size exceeds the 16MB limit: " << document_size << " bytes." ;
json.erase("data");
q = bsoncxx::from_json(json.dump());
/*
auto bucket = db.gridfs_bucket();
bsoncxx::stdx::string_view collection_name = coll.name();
std::string id_str = value["_id"].get<std::string>();
std::string filename_str = std::string(collection_name) + "." + id_str;
bsoncxx::stdx::string_view filename_view(filename_str);
auto uploader = bucket.open_upload_stream(filename_view);
uploader.write(reinterpret_cast<const std::uint8_t*>(v.c_str()), v.size());
auto result = uploader.close();
bsoncxx::builder::stream::document doc_builder{};
doc_builder << "_id" << id_str
<< "gridfs_id" << result.id();
auto res = coll.insert_one(doc_builder.view());
*/
return false;
}
auto res = coll.insert_one(q.view());
return (bool)res;
}
@@ -79,16 +135,22 @@ namespace armarx::armem::server::ltm::util::mongodb
}
} // namespace detail
std::optional<mongocxx::database>
databaseExists(mongocxx::client& client, const std::string& databaseName)
{
//auto names = client.list_databases();
//if (auto it = std::find(names.begin(), names.end(), databaseName); it != names.end())
//{
// return client[databaseName];
//}
return std::nullopt;
std::optional<mongocxx::database>
databaseExists(mongocxx::client& client, const std::string& databaseName) {
// MongoCXX Version > 3.5.0 required
//auto names = client.list_database_names();
auto names = detail::list_database_names(client);
if (auto it = std::find(names.begin(), names.end(), databaseName); it != names.end()) {
//Workaround to create an empty Database client[databaseName] does not create a database if nothing is added
//db = client[databaseName];
//db.create_collection("empty");
//ARMARX_INFO << deactivateSpam() << "Create Database " << databaseName;
return client[databaseName];
}
return std::nullopt;
}
mongocxx::database
ensureDatabaseExists(mongocxx::client& client,
@@ -104,12 +166,16 @@ namespace armarx::armem::server::ltm::util::mongodb
databaseName);
}
}
//Workaround to create an empty Database client[databaseName] does not create a database if nothing is added
//db = client[databaseName];
//db.create_collection("empty");
return client[databaseName];
}
std::optional<mongocxx::collection>
collectionExists(mongocxx::database& db, const std::string& collectionName)
{
//ARMARX_INFO << "CollectionName: " << collectionName;
if (db.has_collection(collectionName))
{
return db[collectionName];
@@ -131,6 +197,7 @@ namespace armarx::armem::server::ltm::util::mongodb
collectionName);
}
}
//Workaround to create an empty Collection db[collectionName] does not create a Collection if no document is added
return db[collectionName];
}
@@ -168,9 +235,41 @@ namespace armarx::armem::server::ltm::util::mongodb
ss << "." << detail::escapeName(id.entityName);
}
//ARMARX_INFO << deactivateSpam() << ss.str();
return ss.str();
}
/*unused
std::vector<std::string>
getDirectSuccessors(const std::vector<std::string>& collection_names, const std::string& prefix) {
std::vector<std::string> direct_successors;
for (const auto& coll_name : collection_names) {
auto segments = split(coll_name, ".");
if (segments.size() > collectionNameDepth) {
std::string current_prefix;
for (size_t i = 0; i < collectionNameDepth; ++i) {
if (i > 0) {
current_prefix += ".";
}
current_prefix += segments[i];
}
if (current_prefix == prefix) {
direct_successors.push_back(segments[collectionNameDepth]);
}
}
}
// Removing duplicates
std::set<std::string> unique_successors(direct_successors.begin(), direct_successors.end());
direct_successors.assign(unique_successors.begin(), unique_successors.end());
return direct_successors;
}
std::vector<std::string>
getSubCollections(mongocxx::database& db, const armem::MemoryID& id) {
return getDirectSuccessors(detail::list_collection_names(db), toCollectionName(id));
}
*/
std::optional<nlohmann::json>
documentExists(mongocxx::collection& collection, const nlohmann::json& json)
{
@@ -178,7 +277,7 @@ namespace armarx::armem::server::ltm::util::mongodb
}
nlohmann::json
ensureDocumentExists(mongocxx::collection& collection,
ensureDocumentExists(mongocxx::database& db, mongocxx::collection& collection,
const nlohmann::json& json,
bool createIfNotExistent)
{
@@ -194,7 +293,7 @@ namespace armarx::armem::server::ltm::util::mongodb
if (!op)
{
detail::insert(collection, json);
detail::insert(db, collection, json);
}
auto find = detail::contains(collection, json);
@@ -218,7 +317,7 @@ namespace armarx::armem::server::ltm::util::mongodb
}
void
writeDataToDocument(mongocxx::collection& collection,
writeDataToDocument(mongocxx::database& db, mongocxx::collection& collection,
const nlohmann::json& query,
const nlohmann::json& update)
{
@@ -231,21 +330,57 @@ namespace armarx::armem::server::ltm::util::mongodb
nlohmann::json full;
full.update(query);
full.update(update);
detail::insert(collection, full);
detail::insert(db, collection, full);
}
}
std::vector<nlohmann::json>
getAllDocuments(mongocxx::collection& collection)
getAllDocuments(mongocxx::database& db, mongocxx::collection& collection)
{
std::vector<nlohmann::json> ret;
mongocxx::cursor cursor = collection.find({});
for (const auto& doc : cursor)
{
ret.push_back(
nlohmann::json::parse(bsoncxx::to_json(doc, bsoncxx::ExtendedJsonMode::k_relaxed)));
}
return ret;
mongocxx::cursor cursor = collection.find({});
//auto bucket = db.gridfs_bucket();
for (const auto& doc : cursor) {
nlohmann::json json_doc = nlohmann::json::parse(bsoncxx::to_json(doc, bsoncxx::ExtendedJsonMode::k_relaxed));
/*
if (doc["gridfs_id"]) {
bsoncxx::types::value gridfs_id = doc["gridfs_id"].get_value();
try {
auto downloader = bucket.open_download_stream(gridfs_id);
// Die heruntergeladene Datei in einen String lesen
std::string gridfs_data;
std::uint8_t buffer[4096]; // 4KB Buffer
std::int64_t bytes_read;
while ((bytes_read = downloader.read(buffer, sizeof(buffer))) > 0) {
gridfs_data.append(reinterpret_cast<const char*>(buffer), bytes_read);
}
// Die Datei wird geschlossen, nachdem der Stream vollständig gelesen wurde
downloader.close();
// Fügen Sie die GridFS-Daten direkt unter dem Namen "data" zum JSON-Dokument hinzu
json_doc["data"] = gridfs_data; // Speichern als String oder ggf. Base64
// Entferne den GridFS-Verweis, falls gewünscht
json_doc.erase("gridfs_id");
} catch (const std::exception& e) {
ARMARX_ERROR << "Error while downloading from GridFS: " << e.what() << std::endl;
json_doc["data"] = "Error retrieving GridFS data";
}
}*/
ret.push_back(json_doc);
}
return ret;
}
} // namespace armarx::armem::server::ltm::util::mongodb
Loading