diff.go 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. // Copyright 2022 The Go Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. package diff
  5. import (
  6. "bytes"
  7. "fmt"
  8. "sort"
  9. "strings"
  10. )
  11. // A pair is a pair of values tracked for both the x and y side of a diff.
  12. // It is typically a pair of line indexes.
  13. type pair struct{ x, y int }
  14. // Diff returns an anchored diff of the two texts old and new
  15. // in the “unified diff” format. If old and new are identical,
  16. // Diff returns a nil slice (no output).
  17. //
  18. // Unix diff implementations typically look for a diff with
  19. // the smallest number of lines inserted and removed,
  20. // which can in the worst case take time quadratic in the
  21. // number of lines in the texts. As a result, many implementations
  22. // either can be made to run for a long time or cut off the search
  23. // after a predetermined amount of work.
  24. //
  25. // In contrast, this implementation looks for a diff with the
  26. // smallest number of “unique” lines inserted and removed,
  27. // where unique means a line that appears just once in both old and new.
  28. // We call this an “anchored diff” because the unique lines anchor
  29. // the chosen matching regions. An anchored diff is usually clearer
  30. // than a standard diff, because the algorithm does not try to
  31. // reuse unrelated blank lines or closing braces.
  32. // The algorithm also guarantees to run in O(n log n) time
  33. // instead of the standard O(n²) time.
  34. //
  35. // Some systems call this approach a “patience diff,” named for
  36. // the “patience sorting” algorithm, itself named for a solitaire card game.
  37. // We avoid that name for two reasons. First, the name has been used
  38. // for a few different variants of the algorithm, so it is imprecise.
  39. // Second, the name is frequently interpreted as meaning that you have
  40. // to wait longer (to be patient) for the diff, meaning that it is a slower algorithm,
  41. // when in fact the algorithm is faster than the standard one.
  42. func Diff(oldName string, old []byte, newName string, new []byte) []byte {
  43. if bytes.Equal(old, new) {
  44. return nil
  45. }
  46. x := lines(old)
  47. y := lines(new)
  48. // Print diff header.
  49. var out bytes.Buffer
  50. fmt.Fprintf(&out, "diff %s %s\n", oldName, newName)
  51. fmt.Fprintf(&out, "--- %s\n", oldName)
  52. fmt.Fprintf(&out, "+++ %s\n", newName)
  53. // Loop over matches to consider,
  54. // expanding each match to include surrounding lines,
  55. // and then printing diff chunks.
  56. // To avoid setup/teardown cases outside the loop,
  57. // tgs returns a leading {0,0} and trailing {len(x), len(y)} pair
  58. // in the sequence of matches.
  59. var (
  60. done pair // printed up to x[:done.x] and y[:done.y]
  61. chunk pair // start lines of current chunk
  62. count pair // number of lines from each side in current chunk
  63. ctext []string // lines for current chunk
  64. )
  65. for _, m := range tgs(x, y) {
  66. if m.x < done.x {
  67. // Already handled scanning forward from earlier match.
  68. continue
  69. }
  70. // Expand matching lines as far possible,
  71. // establishing that x[start.x:end.x] == y[start.y:end.y].
  72. // Note that on the first (or last) iteration we may (or definitey do)
  73. // have an empty match: start.x==end.x and start.y==end.y.
  74. start := m
  75. for start.x > done.x && start.y > done.y && x[start.x-1] == y[start.y-1] {
  76. start.x--
  77. start.y--
  78. }
  79. end := m
  80. for end.x < len(x) && end.y < len(y) && x[end.x] == y[end.y] {
  81. end.x++
  82. end.y++
  83. }
  84. // Emit the mismatched lines before start into this chunk.
  85. // (No effect on first sentinel iteration, when start = {0,0}.)
  86. for _, s := range x[done.x:start.x] {
  87. ctext = append(ctext, "-"+s)
  88. count.x++
  89. }
  90. for _, s := range y[done.y:start.y] {
  91. ctext = append(ctext, "+"+s)
  92. count.y++
  93. }
  94. // If we're not at EOF and have too few common lines,
  95. // the chunk includes all the common lines and continues.
  96. const C = 3 // number of context lines
  97. if (end.x < len(x) || end.y < len(y)) &&
  98. (end.x-start.x < C || (len(ctext) > 0 && end.x-start.x < 2*C)) {
  99. for _, s := range x[start.x:end.x] {
  100. ctext = append(ctext, " "+s)
  101. count.x++
  102. count.y++
  103. }
  104. done = end
  105. continue
  106. }
  107. // End chunk with common lines for context.
  108. if len(ctext) > 0 {
  109. n := end.x - start.x
  110. if n > C {
  111. n = C
  112. }
  113. for _, s := range x[start.x : start.x+n] {
  114. ctext = append(ctext, " "+s)
  115. count.x++
  116. count.y++
  117. }
  118. done = pair{start.x + n, start.y + n}
  119. // Format and emit chunk.
  120. // Convert line numbers to 1-indexed.
  121. // Special case: empty file shows up as 0,0 not 1,0.
  122. if count.x > 0 {
  123. chunk.x++
  124. }
  125. if count.y > 0 {
  126. chunk.y++
  127. }
  128. fmt.Fprintf(&out, "@@ -%d,%d +%d,%d @@\n", chunk.x, count.x, chunk.y, count.y)
  129. for _, s := range ctext {
  130. out.WriteString(s)
  131. }
  132. count.x = 0
  133. count.y = 0
  134. ctext = ctext[:0]
  135. }
  136. // If we reached EOF, we're done.
  137. if end.x >= len(x) && end.y >= len(y) {
  138. break
  139. }
  140. // Otherwise start a new chunk.
  141. chunk = pair{end.x - C, end.y - C}
  142. for _, s := range x[chunk.x:end.x] {
  143. ctext = append(ctext, " "+s)
  144. count.x++
  145. count.y++
  146. }
  147. done = end
  148. }
  149. return out.Bytes()
  150. }
  151. // lines returns the lines in the file x, including newlines.
  152. // If the file does not end in a newline, one is supplied
  153. // along with a warning about the missing newline.
  154. func lines(x []byte) []string {
  155. l := strings.SplitAfter(string(x), "\n")
  156. if l[len(l)-1] == "" {
  157. l = l[:len(l)-1]
  158. } else {
  159. // Treat last line as having a message about the missing newline attached,
  160. // using the same text as BSD/GNU diff (including the leading backslash).
  161. l[len(l)-1] += "\n\\ No newline at end of file\n"
  162. }
  163. return l
  164. }
  165. // tgs returns the pairs of indexes of the longest common subsequence
  166. // of unique lines in x and y, where a unique line is one that appears
  167. // once in x and once in y.
  168. //
  169. // The longest common subsequence algorithm is as described in
  170. // Thomas G. Szymanski, “A Special Case of the Maximal Common
  171. // Subsequence Problem,” Princeton TR #170 (January 1975),
  172. // available at https://research.swtch.com/tgs170.pdf.
  173. func tgs(x, y []string) []pair {
  174. // Count the number of times each string appears in a and b.
  175. // We only care about 0, 1, many, counted as 0, -1, -2
  176. // for the x side and 0, -4, -8 for the y side.
  177. // Using negative numbers now lets us distinguish positive line numbers later.
  178. m := make(map[string]int)
  179. for _, s := range x {
  180. if c := m[s]; c > -2 {
  181. m[s] = c - 1
  182. }
  183. }
  184. for _, s := range y {
  185. if c := m[s]; c > -8 {
  186. m[s] = c - 4
  187. }
  188. }
  189. // Now unique strings can be identified by m[s] = -1+-4.
  190. //
  191. // Gather the indexes of those strings in x and y, building:
  192. // xi[i] = increasing indexes of unique strings in x.
  193. // yi[i] = increasing indexes of unique strings in y.
  194. // inv[i] = index j such that x[xi[i]] = y[yi[j]].
  195. var xi, yi, inv []int
  196. for i, s := range y {
  197. if m[s] == -1+-4 {
  198. m[s] = len(yi)
  199. yi = append(yi, i)
  200. }
  201. }
  202. for i, s := range x {
  203. if j, ok := m[s]; ok && j >= 0 {
  204. xi = append(xi, i)
  205. inv = append(inv, j)
  206. }
  207. }
  208. // Apply Algorithm A from Szymanski's paper.
  209. // In those terms, A = J = inv and B = [0, n).
  210. // We add sentinel pairs {0,0}, and {len(x),len(y)}
  211. // to the returned sequence, to help the processing loop.
  212. J := inv
  213. n := len(xi)
  214. T := make([]int, n)
  215. L := make([]int, n)
  216. for i := range T {
  217. T[i] = n + 1
  218. }
  219. for i := 0; i < n; i++ {
  220. k := sort.Search(n, func(k int) bool {
  221. return T[k] >= J[i]
  222. })
  223. T[k] = J[i]
  224. L[i] = k + 1
  225. }
  226. k := 0
  227. for _, v := range L {
  228. if k < v {
  229. k = v
  230. }
  231. }
  232. seq := make([]pair, 2+k)
  233. seq[1+k] = pair{len(x), len(y)} // sentinel at end
  234. lastj := n
  235. for i := n - 1; i >= 0; i-- {
  236. if L[i] == k && J[i] < lastj {
  237. seq[k] = pair{xi[i], yi[J[i]]}
  238. k--
  239. }
  240. }
  241. seq[0] = pair{0, 0} // sentinel at start
  242. return seq
  243. }