Software zum Installieren eines Smart-Mirror Frameworks , zum Nutzen von hochschulrelevanten Informationen, auf einem Raspberry-Pi.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

subtokenize.js 4.9KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. 'use strict'
  2. var assign = require('../constant/assign.js')
  3. var chunkedSplice = require('./chunked-splice.js')
  4. var shallow = require('./shallow.js')
  5. function subtokenize(events) {
  6. var jumps = {}
  7. var index = -1
  8. var event
  9. var lineIndex
  10. var otherIndex
  11. var otherEvent
  12. var parameters
  13. var subevents
  14. var more
  15. while (++index < events.length) {
  16. while (index in jumps) {
  17. index = jumps[index]
  18. }
  19. event = events[index] // Add a hook for the GFM tasklist extension, which needs to know if text
  20. // is in the first content of a list item.
  21. if (
  22. index &&
  23. event[1].type === 'chunkFlow' &&
  24. events[index - 1][1].type === 'listItemPrefix'
  25. ) {
  26. subevents = event[1]._tokenizer.events
  27. otherIndex = 0
  28. if (
  29. otherIndex < subevents.length &&
  30. subevents[otherIndex][1].type === 'lineEndingBlank'
  31. ) {
  32. otherIndex += 2
  33. }
  34. if (
  35. otherIndex < subevents.length &&
  36. subevents[otherIndex][1].type === 'content'
  37. ) {
  38. while (++otherIndex < subevents.length) {
  39. if (subevents[otherIndex][1].type === 'content') {
  40. break
  41. }
  42. if (subevents[otherIndex][1].type === 'chunkText') {
  43. subevents[otherIndex][1].isInFirstContentOfListItem = true
  44. otherIndex++
  45. }
  46. }
  47. }
  48. } // Enter.
  49. if (event[0] === 'enter') {
  50. if (event[1].contentType) {
  51. assign(jumps, subcontent(events, index))
  52. index = jumps[index]
  53. more = true
  54. }
  55. } // Exit.
  56. else if (event[1]._container || event[1]._movePreviousLineEndings) {
  57. otherIndex = index
  58. lineIndex = undefined
  59. while (otherIndex--) {
  60. otherEvent = events[otherIndex]
  61. if (
  62. otherEvent[1].type === 'lineEnding' ||
  63. otherEvent[1].type === 'lineEndingBlank'
  64. ) {
  65. if (otherEvent[0] === 'enter') {
  66. if (lineIndex) {
  67. events[lineIndex][1].type = 'lineEndingBlank'
  68. }
  69. otherEvent[1].type = 'lineEnding'
  70. lineIndex = otherIndex
  71. }
  72. } else {
  73. break
  74. }
  75. }
  76. if (lineIndex) {
  77. // Fix position.
  78. event[1].end = shallow(events[lineIndex][1].start) // Switch container exit w/ line endings.
  79. parameters = events.slice(lineIndex, index)
  80. parameters.unshift(event)
  81. chunkedSplice(events, lineIndex, index - lineIndex + 1, parameters)
  82. }
  83. }
  84. }
  85. return !more
  86. }
  87. function subcontent(events, eventIndex) {
  88. var token = events[eventIndex][1]
  89. var context = events[eventIndex][2]
  90. var startPosition = eventIndex - 1
  91. var startPositions = []
  92. var tokenizer =
  93. token._tokenizer || context.parser[token.contentType](token.start)
  94. var childEvents = tokenizer.events
  95. var jumps = []
  96. var gaps = {}
  97. var stream
  98. var previous
  99. var index
  100. var entered
  101. var end
  102. var adjust // Loop forward through the linked tokens to pass them in order to the
  103. // subtokenizer.
  104. while (token) {
  105. // Find the position of the event for this token.
  106. while (events[++startPosition][1] !== token) {
  107. // Empty.
  108. }
  109. startPositions.push(startPosition)
  110. if (!token._tokenizer) {
  111. stream = context.sliceStream(token)
  112. if (!token.next) {
  113. stream.push(null)
  114. }
  115. if (previous) {
  116. tokenizer.defineSkip(token.start)
  117. }
  118. if (token.isInFirstContentOfListItem) {
  119. tokenizer._gfmTasklistFirstContentOfListItem = true
  120. }
  121. tokenizer.write(stream)
  122. if (token.isInFirstContentOfListItem) {
  123. tokenizer._gfmTasklistFirstContentOfListItem = undefined
  124. }
  125. } // Unravel the next token.
  126. previous = token
  127. token = token.next
  128. } // Now, loop back through all events (and linked tokens), to figure out which
  129. // parts belong where.
  130. token = previous
  131. index = childEvents.length
  132. while (index--) {
  133. // Make sure we’ve at least seen something (final eol is part of the last
  134. // token).
  135. if (childEvents[index][0] === 'enter') {
  136. entered = true
  137. } else if (
  138. // Find a void token that includes a break.
  139. entered &&
  140. childEvents[index][1].type === childEvents[index - 1][1].type &&
  141. childEvents[index][1].start.line !== childEvents[index][1].end.line
  142. ) {
  143. add(childEvents.slice(index + 1, end))
  144. // Help GC.
  145. token._tokenizer = token.next = undefined
  146. token = token.previous
  147. end = index + 1
  148. }
  149. }
  150. // Help GC.
  151. tokenizer.events = token._tokenizer = token.next = undefined // Do head:
  152. add(childEvents.slice(0, end))
  153. index = -1
  154. adjust = 0
  155. while (++index < jumps.length) {
  156. gaps[adjust + jumps[index][0]] = adjust + jumps[index][1]
  157. adjust += jumps[index][1] - jumps[index][0] - 1
  158. }
  159. return gaps
  160. function add(slice) {
  161. var start = startPositions.pop()
  162. jumps.unshift([start, start + slice.length - 1])
  163. chunkedSplice(events, start, 2, slice)
  164. }
  165. }
  166. module.exports = subtokenize