Python pygments.lexer 模块,do_insertions() 实例源码
我们从Python开源项目中,提取了以下48个代码示例,用于说明如何使用pygments.lexer.do_insertions()。
def get_tokens_unprocessed(self, text):
dylexer = DylanLexer(**self.options)
curcode = ''
insertions = []
for match in self._line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(insertions,
dylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
dylexer.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self,
dylexer.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self,
dylexer.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self,
dylexer.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self,
dylexer.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self,
dylexer.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self,
dylexer.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self,
dylexer.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self,
dylexer.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self,
dylexer.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self, text):
jllexer = JuliaLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('julia>'):
insertions.append((len(curcode), line[:6])]))
curcode += line[6:]
elif line.startswith(' '):
idx = len(curcode)
# without is showing error on same line as before...?
line = "\n" + line
token = (0, Generic.Traceback, line)
insertions.append((idx, [token]))
else:
if curcode:
for item in do_insertions(
insertions, jllexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), line
if curcode: # or item:
for item in do_insertions(
insertions, jllexer.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self, text):
slexer = SLexer(**self.options)
current_code_block = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>') or line.startswith('+'):
# Colorize the prompt as such,
# then put rest of line into current_code_block
insertions.append((len(current_code_block), line[:2])]))
current_code_block += line[2:]
else:
# We have reached a non-prompt line!
# If we have stored prompt lines,need to process them first.
if current_code_block:
# weave together the prompts and highlight code.
for item in do_insertions(
insertions, slexer.get_tokens_unprocessed(current_code_block)):
yield item
# Reset vars for next code block.
current_code_block = ''
insertions = []
# Now process the actual line itself,this is output from R.
yield match.start(), line
# If we happen to end on a code block with nothing after it,need to
# process the last code block. This is neither elegant nor DRY so
# should be changed.
if current_code_block:
for item in do_insertions(
insertions, slexer.get_tokens_unprocessed(current_code_block)):
yield item
def get_tokens_unprocessed(self, text):
bashlexer = BashLexer(**self.options)
pos = 0
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = re.match(r'^((?:\(\S+\))?(?:|sh\S*?|\w+\S+[@:]\S+(?:\s+\S+)'
r'?|\[\S+[@:][^\n]+\].+)[$#%])(.*\n?)' , line)
if m:
# To support output lexers (say diff output),the output
# needs to be broken by prompts whenever the output lexer
# changes.
if not insertions:
pos = match.start()
insertions.append((len(curcode), m.group(1))]))
curcode += m.group(2)
elif line.startswith('>'):
insertions.append((len(curcode), line[:1])]))
curcode += line[1:]
else:
if insertions:
toks = bashlexer.get_tokens_unprocessed(curcode)
for i, t, v in do_insertions(insertions, toks):
yield pos+i, v
yield match.start(), line
insertions = []
curcode = ''
if insertions:
for i,
bashlexer.get_tokens_unprocessed(curcode)):
yield pos+i, v
def get_tokens_unprocessed(self, data):
sql = sqlLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(data):
line = match.group()
if line.startswith('sqlite> ') or line.startswith(' ...> '):
insertions.append((len(curcode), line[:8])]))
curcode += line[8:]
else:
if curcode:
for item in do_insertions(insertions,
sql.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
if line.startswith('sql error: '):
yield (match.start(), line)
else:
yield (match.start(), line)
if curcode:
for item in do_insertions(insertions,
sql.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self,
sql.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self, text):
erlexer = ErlangLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
erlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
if line.startswith('*'):
yield match.start(), line
else:
yield match.start(),
erlexer.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self, jllexer.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self, slexer.get_tokens_unprocessed(current_code_block)):
yield item
def get_tokens_unprocessed(self,
sql.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self, text):
pylexer = PythonLexer(**self.options)
PythonTracebackLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
input_prompt = self.input_prompt.match(line)
continue_prompt = self.continue_prompt.match(line.rstrip())
output_prompt = self.output_prompt.match(line)
if line.startswith("#"):
insertions.append((len(curcode), Comment, line)]))
elif input_prompt is not None:
insertions.append((len(curcode), input_prompt.group())]))
curcode += line[input_prompt.end():]
elif continue_prompt is not None:
insertions.append((len(curcode), continue_prompt.group())]))
curcode += line[continue_prompt.end():]
elif output_prompt is not None:
# Use the 'error' token for output. We should probably make
# our own token,but error is typicaly in a bright color like
# red,so it works fine for our output prompts.
insertions.append((len(curcode), Generic.Error, output_prompt.group())]))
curcode += line[output_prompt.end():]
else:
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(),
pylexer.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self, slexer.get_tokens_unprocessed(current_code_block)):
yield item
def get_tokens_unprocessed(self,
sql.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self, text):
pylexer = PythonLexer(**self.options)
tblexer = PythonTracebackLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
input_prompt = self.input_prompt.match(line)
continue_prompt = self.continue_prompt.match(line.rstrip())
output_prompt = self.output_prompt.match(line)
if line.startswith("#"):
insertions.append((len(curcode),
pylexer.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self, jllexer.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self, slexer.get_tokens_unprocessed(current_code_block)):
yield item
def get_tokens_unprocessed(self,
sql.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self, continue_prompt.group())]))
curcode += line[continue_prompt.end():]
elif output_prompt is not None:
insertions.append((len(curcode),
pylexer.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self, jllexer.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self, slexer.get_tokens_unprocessed(current_code_block)):
yield item
def get_tokens_unprocessed(self,
sql.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self,
pylexer.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self, slexer.get_tokens_unprocessed(current_code_block)):
yield item
def get_tokens_unprocessed(self,
sql.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self,
sql.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self,
erlexer.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self, slexer.get_tokens_unprocessed(current_code_block)):
yield item
def get_tokens_unprocessed(self,
sql.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self, jllexer.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self, slexer.get_tokens_unprocessed(current_code_block)):
yield item
def get_tokens_unprocessed(self,
sql.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self, jllexer.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self, slexer.get_tokens_unprocessed(current_code_block)):
yield item
def get_tokens_unprocessed(self,
sql.get_tokens_unprocessed(curcode)):
yield item
def get_tokens_unprocessed(self,
pylexer.get_tokens_unprocessed(curcode)):
yield item
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 [email protected] 举报,一经查实,本站将立刻删除。