tokens.test.ts 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. import { mock, describe, expect, test } from "bun:test";
  2. // Mock heavy dependency chain: tokenEstimation.ts → log.ts → bootstrap/state.ts
  3. mock.module("src/utils/log.ts", () => ({
  4. logError: () => {},
  5. logToFile: () => {},
  6. getLogDisplayTitle: () => "",
  7. logEvent: () => {},
  8. logMCPError: () => {},
  9. logMCPDebug: () => {},
  10. dateToFilename: (d: Date) => d.toISOString().replace(/[:.]/g, "-"),
  11. getLogFilePath: () => "/tmp/mock-log",
  12. attachErrorLogSink: () => {},
  13. getInMemoryErrors: () => [],
  14. loadErrorLogs: async () => [],
  15. getErrorLogByIndex: async () => null,
  16. captureAPIRequest: () => {},
  17. _resetErrorLogForTesting: () => {},
  18. }));
  19. // Mock tokenEstimation to avoid pulling in API provider deps
  20. mock.module("src/services/tokenEstimation.ts", () => ({
  21. roughTokenCountEstimation: (text: string) => Math.ceil(text.length / 4),
  22. roughTokenCountEstimationForMessages: (msgs: any[]) => msgs.length * 100,
  23. roughTokenCountEstimationForMessage: () => 100,
  24. roughTokenCountEstimationForFileType: () => 100,
  25. bytesPerTokenForFileType: () => 4,
  26. countTokensWithAPI: async () => 0,
  27. countMessagesTokensWithAPI: async () => 0,
  28. countTokensViaHaikuFallback: async () => 0,
  29. }));
  30. // Mock slowOperations to avoid bun:bundle import
  31. mock.module("src/utils/slowOperations.ts", () => ({
  32. jsonStringify: JSON.stringify,
  33. jsonParse: JSON.parse,
  34. slowLogging: { enabled: false },
  35. clone: (v: any) => structuredClone(v),
  36. cloneDeep: (v: any) => structuredClone(v),
  37. callerFrame: () => "",
  38. SLOW_OPERATION_THRESHOLD_MS: 100,
  39. writeFileSync_DEPRECATED: () => {},
  40. }));
  41. const {
  42. getTokenCountFromUsage,
  43. getTokenUsage,
  44. tokenCountFromLastAPIResponse,
  45. messageTokenCountFromLastAPIResponse,
  46. getCurrentUsage,
  47. doesMostRecentAssistantMessageExceed200k,
  48. getAssistantMessageContentLength,
  49. } = await import("../tokens");
  50. // ─── Helpers ────────────────────────────────────────────────────────────
  51. function makeAssistantMessage(
  52. content: any[],
  53. usage?: any,
  54. model?: string,
  55. id?: string
  56. ) {
  57. return {
  58. type: "assistant" as const,
  59. uuid: `test-${Math.random()}`,
  60. message: {
  61. id: id ?? `msg_${Math.random()}`,
  62. role: "assistant" as const,
  63. content,
  64. model: model ?? "claude-sonnet-4-20250514",
  65. usage: usage ?? {
  66. input_tokens: 100,
  67. output_tokens: 50,
  68. cache_creation_input_tokens: 10,
  69. cache_read_input_tokens: 5,
  70. },
  71. },
  72. isApiErrorMessage: false,
  73. };
  74. }
  75. function makeUserMessage(text: string) {
  76. return {
  77. type: "user" as const,
  78. uuid: `test-${Math.random()}`,
  79. message: { role: "user" as const, content: text },
  80. };
  81. }
  82. // ─── getTokenCountFromUsage ─────────────────────────────────────────────
  83. describe("getTokenCountFromUsage", () => {
  84. test("sums all token fields", () => {
  85. const usage = {
  86. input_tokens: 100,
  87. output_tokens: 50,
  88. cache_creation_input_tokens: 20,
  89. cache_read_input_tokens: 10,
  90. };
  91. expect(getTokenCountFromUsage(usage)).toBe(180);
  92. });
  93. test("handles missing cache fields", () => {
  94. const usage = {
  95. input_tokens: 100,
  96. output_tokens: 50,
  97. };
  98. expect(getTokenCountFromUsage(usage)).toBe(150);
  99. });
  100. test("handles zero values", () => {
  101. const usage = {
  102. input_tokens: 0,
  103. output_tokens: 0,
  104. cache_creation_input_tokens: 0,
  105. cache_read_input_tokens: 0,
  106. };
  107. expect(getTokenCountFromUsage(usage)).toBe(0);
  108. });
  109. });
  110. // ─── getTokenUsage ──────────────────────────────────────────────────────
  111. describe("getTokenUsage", () => {
  112. test("returns usage for valid assistant message", () => {
  113. const msg = makeAssistantMessage([{ type: "text", text: "hello" }]);
  114. const usage = getTokenUsage(msg as any);
  115. expect(usage).toBeDefined();
  116. expect(usage!.input_tokens).toBe(100);
  117. });
  118. test("returns undefined for user message", () => {
  119. const msg = makeUserMessage("hello");
  120. expect(getTokenUsage(msg as any)).toBeUndefined();
  121. });
  122. test("returns undefined for synthetic model", () => {
  123. const msg = makeAssistantMessage(
  124. [{ type: "text", text: "hello" }],
  125. { input_tokens: 10, output_tokens: 5 },
  126. "<synthetic>"
  127. );
  128. expect(getTokenUsage(msg as any)).toBeUndefined();
  129. });
  130. });
  131. // ─── tokenCountFromLastAPIResponse ──────────────────────────────────────
  132. describe("tokenCountFromLastAPIResponse", () => {
  133. test("returns token count from last assistant message", () => {
  134. const msgs = [
  135. makeAssistantMessage([{ type: "text", text: "hi" }], {
  136. input_tokens: 200,
  137. output_tokens: 100,
  138. cache_creation_input_tokens: 50,
  139. cache_read_input_tokens: 25,
  140. }),
  141. ];
  142. expect(tokenCountFromLastAPIResponse(msgs as any)).toBe(375);
  143. });
  144. test("returns 0 for empty messages", () => {
  145. expect(tokenCountFromLastAPIResponse([])).toBe(0);
  146. });
  147. test("skips user messages to find last assistant", () => {
  148. const msgs = [
  149. makeAssistantMessage([{ type: "text", text: "hi" }], {
  150. input_tokens: 100,
  151. output_tokens: 50,
  152. }),
  153. makeUserMessage("reply"),
  154. ];
  155. expect(tokenCountFromLastAPIResponse(msgs as any)).toBe(150);
  156. });
  157. });
  158. // ─── messageTokenCountFromLastAPIResponse ───────────────────────────────
  159. describe("messageTokenCountFromLastAPIResponse", () => {
  160. test("returns output_tokens from last assistant", () => {
  161. const msgs = [
  162. makeAssistantMessage([{ type: "text", text: "hi" }], {
  163. input_tokens: 200,
  164. output_tokens: 75,
  165. }),
  166. ];
  167. expect(messageTokenCountFromLastAPIResponse(msgs as any)).toBe(75);
  168. });
  169. test("returns 0 for empty messages", () => {
  170. expect(messageTokenCountFromLastAPIResponse([])).toBe(0);
  171. });
  172. });
  173. // ─── getCurrentUsage ────────────────────────────────────────────────────
  174. describe("getCurrentUsage", () => {
  175. test("returns usage object from last assistant", () => {
  176. const msgs = [
  177. makeAssistantMessage([{ type: "text", text: "hi" }], {
  178. input_tokens: 100,
  179. output_tokens: 50,
  180. cache_creation_input_tokens: 10,
  181. cache_read_input_tokens: 5,
  182. }),
  183. ];
  184. const usage = getCurrentUsage(msgs as any);
  185. expect(usage).toEqual({
  186. input_tokens: 100,
  187. output_tokens: 50,
  188. cache_creation_input_tokens: 10,
  189. cache_read_input_tokens: 5,
  190. });
  191. });
  192. test("returns null for empty messages", () => {
  193. expect(getCurrentUsage([])).toBeNull();
  194. });
  195. test("defaults cache fields to 0", () => {
  196. const msgs = [
  197. makeAssistantMessage([{ type: "text", text: "hi" }], {
  198. input_tokens: 100,
  199. output_tokens: 50,
  200. }),
  201. ];
  202. const usage = getCurrentUsage(msgs as any);
  203. expect(usage!.cache_creation_input_tokens).toBe(0);
  204. expect(usage!.cache_read_input_tokens).toBe(0);
  205. });
  206. });
  207. // ─── doesMostRecentAssistantMessageExceed200k ───────────────────────────
  208. describe("doesMostRecentAssistantMessageExceed200k", () => {
  209. test("returns false when under 200k", () => {
  210. const msgs = [
  211. makeAssistantMessage([{ type: "text", text: "hi" }], {
  212. input_tokens: 1000,
  213. output_tokens: 500,
  214. }),
  215. ];
  216. expect(doesMostRecentAssistantMessageExceed200k(msgs as any)).toBe(false);
  217. });
  218. test("returns true when over 200k", () => {
  219. const msgs = [
  220. makeAssistantMessage([{ type: "text", text: "hi" }], {
  221. input_tokens: 190000,
  222. output_tokens: 15000,
  223. }),
  224. ];
  225. expect(doesMostRecentAssistantMessageExceed200k(msgs as any)).toBe(true);
  226. });
  227. test("returns false for empty messages", () => {
  228. expect(doesMostRecentAssistantMessageExceed200k([])).toBe(false);
  229. });
  230. });
  231. // ─── getAssistantMessageContentLength ───────────────────────────────────
  232. describe("getAssistantMessageContentLength", () => {
  233. test("counts text content length", () => {
  234. const msg = makeAssistantMessage([{ type: "text", text: "hello" }]);
  235. expect(getAssistantMessageContentLength(msg as any)).toBe(5);
  236. });
  237. test("counts multiple blocks", () => {
  238. const msg = makeAssistantMessage([
  239. { type: "text", text: "hello" },
  240. { type: "text", text: "world" },
  241. ]);
  242. expect(getAssistantMessageContentLength(msg as any)).toBe(10);
  243. });
  244. test("counts thinking content", () => {
  245. const msg = makeAssistantMessage([
  246. { type: "thinking", thinking: "let me think" },
  247. ]);
  248. expect(getAssistantMessageContentLength(msg as any)).toBe(12);
  249. });
  250. test("returns 0 for empty content", () => {
  251. const msg = makeAssistantMessage([]);
  252. expect(getAssistantMessageContentLength(msg as any)).toBe(0);
  253. });
  254. test("counts tool_use input", () => {
  255. const msg = makeAssistantMessage([
  256. { type: "tool_use", id: "t1", name: "Bash", input: { command: "ls" } },
  257. ]);
  258. expect(getAssistantMessageContentLength(msg as any)).toBeGreaterThan(0);
  259. });
  260. });