xwiki livedata筛选和分页

xwiki livedata

前文中我们自定义页面,让livedata去加载自动定义页面,实现实时数据展示。发现有一个问题 ,筛选的时候没有正确显示数据

1、f12 查看接口请求

http://localhost:8080/xwiki/rest/liveData/sources/liveTable/entries?timestamp=1756188130870\&namespace=wiki%3Axwiki\&sourceParams.resultPage=xwiki%3Af.WebHome\&sourceParams.translationPrefix=admin.livetable.\&sourceParams.queryFilters=currentlanguage%2Chidden\&properties=id\&properties=name\&properties=level\&properties=parentId\&offset=0\&limit=15\&filters.name=contains%3Af\&sort=id\&descending=

使用url解码

http://localhost:8080/xwiki/rest/liveData/sources/liveTable/entries?timestamp=1756188130870\&namespace=wiki:xwiki\&sourceParams.resultPage=xwiki:f.WebHome\&sourceParams.translationPrefix=admin.livetable.\&sourceParams.queryFilters=currentlanguage,hidden\&properties=id\&properties=name\&properties=level\&properties=parentId\&offset=0\&limit=15\&filters.name=contains:f\&sort=id\&descending=

2.修改之前的自定义页面

复制代码
{{groovy}}
import groovy.json.JsonOutput
import org.slf4j.Logger
import java.net.URLDecoder

// 获取 SLF4J 日志器
def logger = services.logging.getLogger("LiveTableScript")

// 设置 JSON 内容类型
if (request?.getParameter("xpage") == "plain") {
    response.setContentType("application/json")
}

// 模拟数据库数据(根据之前的输出包含 8 条记录)
def all = [
    [id: "620000000000000", name: "甘肃省", level: 1, parentId: "0"],
    [id: "621000000000000", name: "甘肃省1", level: 1, parentId: "0"],
    [id: "630000000000000", name: "青海省", level: 1, parentId: "0"],
    [id: "631000000000000", name: "青海省2", level: 1, parentId: "0"],
    [id: "640000000000000", name: "宁夏回族自治区", level: 1, parentId: "0"],
    [id: "641000000000000", name: "宁夏回族自治区3", level: 1, parentId: "0"],
    [id: "650000000000000", name: "新疆维吾尔自治区", level: 1, parentId: "0"],
    [id: "651000000000000", name: "新疆维吾尔自治区4", level: 1, parentId: "0"]
]

// 处理参数
def offset = (request?.getParameter("offset") ?: "0") as int
def limit = (request?.getParameter("limit") ?: "15") as int
// 尝试多种可能的参数名并解码中文字符
def searchName = null
def rawSearch = request?.getParameter("filters.name") ?:
                request?.getParameter("filter.name") ?:
                request?.getParameter("name")
if (rawSearch) {
    searchName = URLDecoder.decode(rawSearch.replaceFirst(/^contains:/, '').trim(), "UTF-8")
}
def sort = request?.getParameter("sort") ?: "id"
def dir = request?.getParameter("dir")?.toLowerCase() ?: "asc"

// 记录调试日志到 XWiki 日志
logger.debug("Request parameters: {}", request?.parameterMap)
logger.debug("Search value: {}", searchName)

// 过滤逻辑
def filtered = all
if (searchName) {
    filtered = all.findAll { it.name.contains(searchName) }
    logger.debug("Filtered results: {}", filtered)
}

// 排序逻辑
if (dir == "desc") {
    filtered = filtered.sort { a, b -> -(a[sort] ?: 0) <=> (b[sort] ?: 0) }
} else {
    filtered = filtered.sort { a, b -> (a[sort] ?: 0) <=> (b[sort] ?: 0) }
}

// 计算总数
def totalCount = filtered.size() ?: 0
def entries = filtered.drop(offset).take(limit)

// 构造 Live Table JSON
def result = [
    totalrows: totalCount,
    returnedrows: entries.size(),
    offset: offset + 1,
    reqNo: (request?.getParameter("reqNo") ?: "1") as int,
    rows: entries.collect { entry ->
        def row = [
            id: entry.id,
            name: entry.name,
            level: entry.level,
            parentId: entry.parentId,
            doc_viewable: true
        ]
        return row
    }
]

// 输出 JSON
print JsonOutput.toJson(result)
{{/groovy}}

3.分页问题

这里反生了有意思问题,输入甘肃只显示其中一个。

通过打印日志发现,请求的时候offset被设置为1,导致数据从1开始查询, XWiki LiveData offset 从 1 开始,这里要减 1 转换成 list 的 0 基准,修复语法

复制代码
{{groovy}}
import groovy.json.JsonOutput
import org.slf4j.Logger
import java.net.URLDecoder

def logger = services.logging.getLogger("LiveTableScript")

if (request?.getParameter("xpage") == "plain") {
    response.setContentType("application/json; charset=UTF-8")
}

def all = [
    [id: "620000000000000", name: "甘肃省", level: 1, parentId: "0"],
    [id: "621000000000000", name: "甘肃省1", level: 1, parentId: "0"],
    [id: "630000000000000", name: "青海省", level: 1, parentId: "0"],
    [id: "631000000000000", name: "青海省2", level: 1, parentId: "0"],
    [id: "640000000000000", name: "宁夏回族自治区", level: 1, parentId: "0"],
    [id: "641000000000000", name: "宁夏回族自治区3", level: 1, parentId: "0"],
    [id: "650000000000000", name: "新疆维吾尔自治区", level: 1, parentId: "0"],
    [id: "651000000000000", name: "新疆维吾尔自治区4", level: 1, parentId: "0"]
]

// ==== 参数处理 ====
// XWiki LiveData offset 从 1 开始,这里要减 1 转换成 list 的 0 基准
def rawOffset = (request?.getParameter("offset") ?: "1") as int
def offset = rawOffset > 0 ? rawOffset - 1 : 0
def limit = (request?.getParameter("limit") ?: "15") as int
def sortField = request?.getParameter("sort") ?: "id"
def dir = request?.getParameter("descending") == "true" ? "desc" : "asc"
def rawSearch = request?.getParameter("filters.name") ?:
                request?.getParameter("filter.name") ?:
                request?.getParameter("name")
def searchName = rawSearch ? URLDecoder.decode(rawSearch.replaceFirst(/^contains:/, '').trim(), "UTF-8") : null

logger.debug("Request URL: {}", request?.requestURL)
logger.debug("Request parameters: {}", request?.parameterMap)
logger.debug("Raw offset parameter: {}", rawOffset)
logger.debug("Converted offset (0-based): {}", offset)
logger.debug("Limit: {}", limit)
logger.debug("Raw search parameter: {}", rawSearch)
logger.debug("Decoded search value: {}", searchName)
logger.debug("Sort field: {}", sortField)
logger.debug("Sort direction: {}", dir)

// ==== 过滤 ====
def filtered = all
if (searchName) {
    filtered = all.findAll { record ->
        def normalizedName = record.name.trim()
        normalizedName.contains(searchName)
    }
    logger.debug("Filtered results: {}", filtered)
}

// ==== 排序 ====
if (filtered) {
    filtered = filtered.sort { a, b ->
        def valA = a[sortField] ?: (sortField == "name" ? "" : 0)
        def valB = b[sortField] ?: (sortField == "name" ? "" : 0)
        dir == "desc" ? -(valA <=> valB) : (valA <=> valB)
    }
}

// ==== 分页 ====
def totalCount = filtered.size() ?: 0
def entries = filtered.drop(offset).take(limit)

logger.debug("Paginated entries: {}", entries)

// ==== 输出 ====
def result = [
    totalrows   : totalCount,
    returnedrows: entries.size(),
    offset      : rawOffset,  
    reqNo       : (request?.getParameter("reqNo") ?: "1") as int,
    rows        : entries.collect { entry ->
        [
            id         : entry.id,
            name       : entry.name,
            level      : entry.level,
            parentId   : entry.parentId,
            doc_viewable: true
        ]
    }
]

print JsonOutput.toJson(result)
{{/groovy}}

已经可以筛选和分页了,后边会接入实时数据。

相关推荐
胡西风_foxww3 天前
ObsidianAI_学习一个陌生知识领域_建立学习路径和知识库框架_写一本书
人工智能·笔记·学习·知识库·obsidian·notebooklm·写一本书
千桐科技3 天前
qKnow 知识平台商业版 v2.6.1 正式发布:移除对第三方 LLM 应用框架的依赖,一次真正走向自主可控的里程碑升级
大模型·知识图谱·图数据库·知识库·rag·qknow·知识平台
中杯可乐多加冰5 天前
RAG 深度实践系列(三):RAG 技术演变与核心架构的深度剖析
人工智能·深度学习·大模型·llm·知识库·rag·graphrag
JEECG低代码平台11 天前
Jeecg-AI 开源的 AI 应用平台,实现 n8n 的循环节点
低代码·智能问答·知识库·ai平台·ai流程编排·循环节点
云雾J视界14 天前
RAG 还是微调?用 Gemini API 打造企业私有知识库的落地路径
大数据·人工智能·api·知识库·rag·gemini
utmhikari15 天前
【极客日常】快速上手复杂后端项目开发的经验
ai·llm·知识库·系统设计·后端开发·rag
杰拉拉德17 天前
Spring AI + Elasticsearch:语义/关键字/混合检索与知识问答
elasticsearch·知识库·rag·spring ai·混合检索·语义检索·关键字检索
韦东东19 天前
27s→1.3s:“小模型 + 知识库”的工业隐患识别技术复盘(全链路)
大模型·知识库·工业隐患
_OP_CHEN20 天前
【Coze智能体开发】(三)解锁 Coze 智能体超能力:插件 + 知识库 + 数据库全解析,让 AI 从 “会聊天“ 到 “能办事“!
数据库·知识库·插件·coze·智能体开发·coze资源
系'辞21 天前
【obsidian指南】配置obsidian git插件,实现obsidian数据定时同步到github仓库(Mac电脑)
macos·github·agent·知识库