Date: 2026-04-02 PR: https://github.com/fetch-rewards/labs-skills/pull/16 Skills: summarizing-specs, gathering-coding-context
| Skill | Pass Rate | Baseline | Lift | Token Delta |
|---|
Date: 2026-04-02 PR: https://github.com/fetch-rewards/labs-skills/pull/16 Skills: summarizing-specs, gathering-coding-context
| Skill | Pass Rate | Baseline | Lift | Token Delta |
|---|
A self-contained benchmark comparing two LLM tool-calling patterns across queries of varying complexity (1 tool through 8+ tools).
Both use GPT-4.1 with the same 20 tools. The benchmark measures tokens, latency, and LLM round-trips.
| def check_constraints(constraints, image_dim, window_axes): | |
| # checks the movement of axes possible or not | |
| move_left, move_right, move_up, move_down = constraints | |
| x, y = image_dim | |
| x1, x2, y1, y2 = window_axes | |
| if x1<=0: move_left = False | |
| if x2>=x: move_right = False | |
| if y1<=0: move_up = False | |
| if y2>=y: move_down = False |
| detector = MTCNN() # face detection model | |
| image = plt.imread(input_file_path) | |
| results = detector.detect_faces(image) | |
| confidences = [result['confidence'] for result in results] | |
| index = np.argmax(confidences) | |
| x1, y1, ww, hh = results[index]['box'] | |
| h, k = x1+ww//2, y1+hh//2 | |
| image = crop(image, (h, k), aspect_ratio) |
| image = plt.imread(file_path) | |
| m, n = image.shape[0:-1] | |
| image = crop(image, center=(m//2+290, n//2), aspect_ratio=(1.4/2.0)) | |
| plt.imsave('output.jpg', image) |
| """ | |
| Minimal character-level Vanilla RNN model. Written by Andrej Karpathy (@karpathy) | |
| BSD License | |
| """ | |
| import numpy as np | |
| # data I/O | |
| data = open('input.txt', 'r').read() # should be simple plain text file | |
| chars = list(set(data)) | |
| data_size, vocab_size = len(data), len(chars) |
| def get_filter_output(input_img_path, filter): | |
| img = plt.imread(input_img_path) | |
| filter_size = filter.shape[0] | |
| pad_width = (filter_size-1)//2 | |
| if len(img.shape)==3: | |
| filter_3D = np.stack((filter, filter, filter), axis=2) | |
| temp = np.pad(img, ((pad_width, pad_width), (pad_width, pad_width), (0, 0)), mode='constant', constant_values=0) | |
| temp = np.array([[np.sum(np.multiply(temp[i:i+filter_size, j:j+filter_size, :], filter_3D)) for j in range(temp.shape[1]-filter_size+1)] for i in range(temp.shape[0]-filter_size+1)]) | |
| else: | |
| temp = np.pad(img, ((pad_width, pad_width), (pad_width, pad_width)), mode='constant', constant_values=0) |