TLDR摘要
在现代的MongoDB版本中,您可以$slice
通过基本聚合结果来强行执行此操作。对于“大”的结果,对每个分组运行并行查询,而不是(示范上市是在回答结束时),或等待SERVER-9377来解决,这将允许“限制”的项目的数量$push
至数组。
db.books.aggregate([
{ "$group": {
"_id": {
"addr": "$addr",
"book": "$book"
},
"bookCount": { "$sum": 1 }
}},
{ "$group": {
"_id": "$_id.addr",
"books": {
"$push": {
"book": "$_id.book",
"count": "$bookCount"
},
},
"count": { "$sum": "$bookCount" }
}},
{ "$sort": { "count": -1 } },
{ "$limit": 2 },
{ "$project": {
"books": { "$slice": [ "$books", 2 ] },
"count": 1
}}
])
MongoDB 3.6预览版
仍无法解析SERVER-9377,但在此版本中$lookup
允许使用新的“非相关”选项,该选项将"pipeline"
表达式作为参数而不是"localFields"
and "foreignFields"
选项。然后,这允许与另一个管道表达式进行“自联接”,我们可以在其中应用$limit
以返回“ top-n”结果。
db.books.aggregate([
{ "$group": {
"_id": "$addr",
"count": { "$sum": 1 }
}},
{ "$sort": { "count": -1 } },
{ "$limit": 2 },
{ "$lookup": {
"from": "books",
"let": {
"addr": "$_id"
},
"pipeline": [
{ "$match": {
"$expr": { "$eq": [ "$addr", "$$addr"] }
}},
{ "$group": {
"_id": "$book",
"count": { "$sum": 1 }
}},
{ "$sort": { "count": -1 } },
{ "$limit": 2 }
],
"as": "books"
}}
])
当然,这里的另一个附加功能是能够通过$expr
使用$match
来选择“联接”中的匹配项来对变量进行插值,但是一般前提是“管道中的管道”,其中内部内容可以通过父项的匹配来过滤。由于它们本身都是“管道”,因此我们可以$limit
分别得出结果。
这将是运行并行查询的下一个最佳选择,并且如果$match
允许并能够在“子管道”处理中使用索引,则实际上会更好。因此,它不使用所$push
引用问题所要求的“限制为”,它实际上提供了应该可以更好地工作的东西。
原始内容
您似乎偶然发现了最重要的“ N”问题。从某种程度上来说,您的问题很容易解决,尽管没有您要求的确切限制:
db.books.aggregate([
{ "$group": {
"_id": {
"addr": "$addr",
"book": "$book"
},
"bookCount": { "$sum": 1 }
}},
{ "$group": {
"_id": "$_id.addr",
"books": {
"$push": {
"book": "$_id.book",
"count": "$bookCount"
},
},
"count": { "$sum": "$bookCount" }
}},
{ "$sort": { "count": -1 } },
{ "$limit": 2 }
])
现在,您将得到如下结果:
{
"result" : [
{
"_id" : "address1",
"books" : [
{
"book" : "book4",
"count" : 1
},
{
"book" : "book5",
"count" : 1
},
{
"book" : "book1",
"count" : 3
}
],
"count" : 5
},
{
"_id" : "address2",
"books" : [
{
"book" : "book5",
"count" : 1
},
{
"book" : "book1",
"count" : 2
}
],
"count" : 3
}
],
"ok" : 1
}
因此,这与您的要求有所不同,尽管我们确实获得了地址值的最佳结果,但“书”的基本选择不仅限于所需的结果量。
事实证明这很难做到,但是可以完成,尽管复杂度随需要匹配的项目数而增加。为了简单起见,我们最多可以将其保留在2个匹配项中:
db.books.aggregate([
{ "$group": {
"_id": {
"addr": "$addr",
"book": "$book"
},
"bookCount": { "$sum": 1 }
}},
{ "$group": {
"_id": "$_id.addr",
"books": {
"$push": {
"book": "$_id.book",
"count": "$bookCount"
},
},
"count": { "$sum": "$bookCount" }
}},
{ "$sort": { "count": -1 } },
{ "$limit": 2 },
{ "$unwind": "$books" },
{ "$sort": { "count": 1, "books.count": -1 } },
{ "$group": {
"_id": "$_id",
"books": { "$push": "$books" },
"count": { "$first": "$count" }
}},
{ "$project": {
"_id": {
"_id": "$_id",
"books": "$books",
"count": "$count"
},
"newBooks": "$books"
}},
{ "$unwind": "$newBooks" },
{ "$group": {
"_id": "$_id",
"num1": { "$first": "$newBooks" }
}},
{ "$project": {
"_id": "$_id",
"newBooks": "$_id.books",
"num1": 1
}},
{ "$unwind": "$newBooks" },
{ "$project": {
"_id": "$_id",
"num1": 1,
"newBooks": 1,
"seen": { "$eq": [
"$num1",
"$newBooks"
]}
}},
{ "$match": { "seen": false } },
{ "$group":{
"_id": "$_id._id",
"num1": { "$first": "$num1" },
"num2": { "$first": "$newBooks" },
"count": { "$first": "$_id.count" }
}},
{ "$project": {
"num1": 1,
"num2": 1,
"count": 1,
"type": { "$cond": [ 1, [true,false],0 ] }
}},
{ "$unwind": "$type" },
{ "$project": {
"books": { "$cond": [
"$type",
"$num1",
"$num2"
]},
"count": 1
}},
{ "$group": {
"_id": "$_id",
"count": { "$first": "$count" },
"books": { "$push": "$books" }
}},
{ "$sort": { "count": -1 } }
])
这样一来,实际上就会为您提供来自前两个“地址”条目的前2个“书”。
但是为了我的钱,请保留第一种形式,然后简单地“切片”返回第一个“ N”个元素的数组元素。
示范代码
该演示代码适合与v8.x和v10.x发行版的NodeJS当前LTS版本一起使用。这主要是针对async/await
语法的,但是在通用流程中确实没有任何这样的限制,并且几乎没有改动就可以适应普通的承诺,甚至可以适应普通的回调实现。
index.js
const { MongoClient } = require('mongodb');
const fs = require('mz/fs');
const uri = 'mongodb://localhost:27017';
const log = data => console.log(JSON.stringify(data, undefined, 2));
(async function() {
try {
const client = await MongoClient.connect(uri);
const db = client.db('bookDemo');
const books = db.collection('books');
let { version } = await db.command({ buildInfo: 1 });
version = parseFloat(version.match(new RegExp(/(?:(?!-).)*/))[0]);
// Clear and load books
await books.deleteMany({});
await books.insertMany(
(await fs.readFile('books.json'))
.toString()
.replace(/\n$/,"")
.split("\n")
.map(JSON.parse)
);
if ( version >= 3.6 ) {
// Non-correlated pipeline with limits
let result = await books.aggregate([
{ "$group": {
"_id": "$addr",
"count": { "$sum": 1 }
}},
{ "$sort": { "count": -1 } },
{ "$limit": 2 },
{ "$lookup": {
"from": "books",
"as": "books",
"let": { "addr": "$_id" },
"pipeline": [
{ "$match": {
"$expr": { "$eq": [ "$addr", "$$addr" ] }
}},
{ "$group": {
"_id": "$book",
"count": { "$sum": 1 },
}},
{ "$sort": { "count": -1 } },
{ "$limit": 2 }
]
}}
]).toArray();
log({ result });
}
// Serial result procesing with parallel fetch
// First get top addr items
let topaddr = await books.aggregate([
{ "$group": {
"_id": "$addr",
"count": { "$sum": 1 }
}},
{ "$sort": { "count": -1 } },
{ "$limit": 2 }
]).toArray();
// Run parallel top books for each addr
let topbooks = await Promise.all(
topaddr.map(({ _id: addr }) =>
books.aggregate([
{ "$match": { addr } },
{ "$group": {
"_id": "$book",
"count": { "$sum": 1 }
}},
{ "$sort": { "count": -1 } },
{ "$limit": 2 }
]).toArray()
)
);
// Merge output
topaddr = topaddr.map((d,i) => ({ ...d, books: topbooks[i] }));
log({ topaddr });
client.close();
} catch(e) {
console.error(e)
} finally {
process.exit()
}
})()
books.json
{ "addr": "address1", "book": "book1" }
{ "addr": "address2", "book": "book1" }
{ "addr": "address1", "book": "book5" }
{ "addr": "address3", "book": "book9" }
{ "addr": "address2", "book": "book5" }
{ "addr": "address2", "book": "book1" }
{ "addr": "address1", "book": "book1" }
{ "addr": "address15", "book": "book1" }
{ "addr": "address9", "book": "book99" }
{ "addr": "address90", "book": "book33" }
{ "addr": "address4", "book": "book3" }
{ "addr": "address5", "book": "book1" }
{ "addr": "address77", "book": "book11" }
{ "addr": "address1", "book": "book1" }