Add comprehensive development roadmap via GitHub Issues
Created 10 detailed GitHub issues covering: - Project activation and management UI (#1-2) - Worker node coordination and visualization (#3-4) - Automated GitHub repository scanning (#5) - Intelligent model-to-issue matching (#6) - Multi-model task execution system (#7) - N8N workflow integration (#8) - Hive-Bzzz P2P bridge (#9) - Peer assistance protocol (#10) Each issue includes detailed specifications, acceptance criteria, technical implementation notes, and dependency mapping. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
76
mcp-server/node_modules/lunr/lib/tokenizer.js
generated
vendored
Normal file
76
mcp-server/node_modules/lunr/lib/tokenizer.js
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
/*!
|
||||
* lunr.tokenizer
|
||||
* Copyright (C) @YEAR Oliver Nightingale
|
||||
*/
|
||||
|
||||
/**
|
||||
* A function for splitting a string into tokens ready to be inserted into
|
||||
* the search index. Uses `lunr.tokenizer.separator` to split strings, change
|
||||
* the value of this property to change how strings are split into tokens.
|
||||
*
|
||||
* This tokenizer will convert its parameter to a string by calling `toString` and
|
||||
* then will split this string on the character in `lunr.tokenizer.separator`.
|
||||
* Arrays will have their elements converted to strings and wrapped in a lunr.Token.
|
||||
*
|
||||
* Optional metadata can be passed to the tokenizer, this metadata will be cloned and
|
||||
* added as metadata to every token that is created from the object to be tokenized.
|
||||
*
|
||||
* @static
|
||||
* @param {?(string|object|object[])} obj - The object to convert into tokens
|
||||
* @param {?object} metadata - Optional metadata to associate with every token
|
||||
* @returns {lunr.Token[]}
|
||||
* @see {@link lunr.Pipeline}
|
||||
*/
|
||||
lunr.tokenizer = function (obj, metadata) {
|
||||
if (obj == null || obj == undefined) {
|
||||
return []
|
||||
}
|
||||
|
||||
if (Array.isArray(obj)) {
|
||||
return obj.map(function (t) {
|
||||
return new lunr.Token(
|
||||
lunr.utils.asString(t).toLowerCase(),
|
||||
lunr.utils.clone(metadata)
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
var str = obj.toString().toLowerCase(),
|
||||
len = str.length,
|
||||
tokens = []
|
||||
|
||||
for (var sliceEnd = 0, sliceStart = 0; sliceEnd <= len; sliceEnd++) {
|
||||
var char = str.charAt(sliceEnd),
|
||||
sliceLength = sliceEnd - sliceStart
|
||||
|
||||
if ((char.match(lunr.tokenizer.separator) || sliceEnd == len)) {
|
||||
|
||||
if (sliceLength > 0) {
|
||||
var tokenMetadata = lunr.utils.clone(metadata) || {}
|
||||
tokenMetadata["position"] = [sliceStart, sliceLength]
|
||||
tokenMetadata["index"] = tokens.length
|
||||
|
||||
tokens.push(
|
||||
new lunr.Token (
|
||||
str.slice(sliceStart, sliceEnd),
|
||||
tokenMetadata
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
sliceStart = sliceEnd + 1
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return tokens
|
||||
}
|
||||
|
||||
/**
|
||||
* The separator used to split a string into tokens. Override this property to change the behaviour of
|
||||
* `lunr.tokenizer` behaviour when tokenizing strings. By default this splits on whitespace and hyphens.
|
||||
*
|
||||
* @static
|
||||
* @see lunr.tokenizer
|
||||
*/
|
||||
lunr.tokenizer.separator = /[\s\-]+/
|
||||
Reference in New Issue
Block a user