bnew

Veteran
Joined
Nov 1, 2015
Messages
56,018
Reputation
8,229
Daps
157,691
originally posted here: Mods I know yall are “busy” but Twitter ➡️ “X” embeds gotta start working

[replace X.com with twitter.com ]​


used bing chat to create a userscript that replaces and occurrence of https://www.x.com or https://x.com with https://twitter.com.
it'll only replace the text in the input field on thecoli when bbcode is enabled.

0WhpwKl.gif

i was doing ctrl-v https://twitter.com/PBS/status/1642240482813042688 and as you can see it automatically changed to twitter.com.
if you have a x.com link in html mode editor and switch to bbcode editor, the url won't change immediately, you'll have to press space, type something or hit ENTER inside the field for it to change. theres a small annoying cursor position issue i couldn't get rid of without causing other bugs.

edit:
Toggle the "BB code" button and then the buttons to the left should turn grey.
1gNRnIE.png

Code:
// ==UserScript==
// @name Replace X.com URLs with Twitter.com URLs
// @namespace    http://tampermonkey.net/
// @description Replaces URLs containing "https://www.x.com" or "https://x.com" with "https://twitter.com".
// @author Author Name
// @version 1.0
// @match        https://www.thecoli.com/threads/*
// @match        https://www.thecoli.com/forums/*/post-thread
// @match        https://thecoli.com/threads/*
// @match        https://xenforo.com/community/forums/*
// @grant        none
// ==/UserScript==

(function() {
'use strict';

// Function to get the current cursor position in a text input field
function getCursorPosition(input) {
let position = 0;
if ('selectionStart' in input) {
position = input.selectionStart;
} else if (document.selection) {
input.focus();
let selection = document.selection.createRange();
selection.moveStart('character', -input.value.length);
position = selection.text.length;
}
return position;
}

// Function to set the cursor position in a text input field
function setCursorPosition(input, position) {
if (input.setSelectionRange) {
input.focus();
input.setSelectionRange(position, position);
} else if (input.createTextRange) {
let range = input.createTextRange();
range.collapse(true);
range.moveEnd('character', position);
range.moveStart('character', position);
range.select();
}
}

// Function to replace URLs in text input fields
function replaceURLs(event) {
// Get all editable fields on the page
let editableFields = document.querySelectorAll('[contenteditable="true"], .fr-element.fr-view.fr-element-scroll-visible');

// Get the target element of the event
let target = event.target;

// Check if the target is a text input field or an editable field
if (target.matches('textarea.input') || editableFields.includes(target)) {
// Save the current cursor position
let cursorPosition = getCursorPosition(target);

// Replace URLs in the value or innerHTML of the target element
let lines = target.value ? target.value.split('\n') : target.innerHTML.split('\n');
for (let i = 0; i < lines.length; i++) {
lines[i] = lines[i].replace(/https:\/\/(www\.)?x\.com/g, 'https://twitter.com');
if (i === cursorPosition.line) {
cursorPosition.column = lines[i].length;
}
}
target.value ? target.value = lines.join('\n') : target.innerHTML = lines.join('\n');

// Restore the cursor position
setCursorPosition(target, cursorPosition);
}
}

// Function to prevent cursor movement by arrow keys
//    function preventCursorMovement(event) {
// Check if the key pressed is left or right arrow
//        if (event.keyCode == 37 || event.keyCode == 39) {
// Prevent the default behavior of the key
event.preventDefault();
//        }
//    }

// Add event listeners for input and keydown events on the document
document.addEventListener('input', replaceURLs);
//    document.addEventListener('keydown', preventCursorMovement);
})();
Code:





A,I generated instructions:
Here are the step-by-step instructions to add the userscript to Chrome or Firefox:
For Chrome:

  1. Install the Tampermonkey extension from the Chrome Web Store.
  2. Once Tampermonkey is installed, click on the Tampermonkey icon in the toolbar and select “Create a new script…”
  3. Paste your userscript into the editor and save it.
  4. The userscript should now be installed and will run whenever you visit a page that matches its @match or @include rules.
For Firefox:

  1. Install the Greasemonkey or Tampermonkey extension from the Firefox Add-ons site.
  2. Once Greasemonkey or Tampermonkey is installed, click on the Greasemonkey or Tampermonkey icon in the toolbar and select “Add a new script…”
  3. Paste your userscript into the editor and save it.
  4. The userscript should now be installed and will run whenever you visit a page that matches its @match or @include rules.
you can also use the userscript on android with firefox or kiwi browser.


since twitter officially changed it's links to x.com, I updated the userscript to work in html editor mode as well as bbcode editor mode. so anytime you paste a https://x.com or https://www.x.com url it'll automatically replace the domain with https://twitter.com. making the embeds work seamlessly since the site addons haven't been updated to support x.com.


Code:
// ==UserScript==
// @name         Replace X.com URLs with Twitter.com URLs
// @namespace    http://tampermonkey.net/
// @description  Replaces URLs containing "https://x.com" or "https://www.x.com" with "https://twitter.com".
// @author       Author Name
// @version      1.3
// @match        https://www.thecoli.com/threads/*
// @match        https://thecoli.com/threads/*
// @match        https://xenforo.com/community/forums/*
// @grant        none
// ==/UserScript==

(function() {
    'use strict';

    // Function to get the current cursor position in a text input field
    function getCursorPosition(input) {
        let position = 0;
        if ('selectionStart' in input) {
            position = input.selectionStart;
        } else if (document.selection) {
            input.focus();
            let selection = document.selection.createRange();
            selection.moveStart('character', -input.value.length);
            position = selection.text.length;
        }
        return position;
    }

    // Function to set the cursor position in a text input field
    function setCursorPosition(input, position) {
        if (input.setSelectionRange) {
            input.focus();
            input.setSelectionRange(position, position);
        } else if (input.createTextRange) {
            let range = input.createTextRange();
            range.collapse(true);
            range.moveEnd('character', position);
            range.moveStart('character', position);
            range.select();
        }
    }

    // Function to replace URLs in text input fields and editable areas
    function replaceURLs(event) {
        // Get all editable fields on the page
        let editableFields = document.querySelectorAll('[contenteditable="true"], .fr-element.fr-view.fr-element-scroll-visible, textarea.input');

        editableFields.forEach(function(field) {
            // Save the current cursor position
            let cursorPosition = getCursorPosition(field);

            // Replace URLs in the value or innerHTML of the field
            let value = field.value || field.innerHTML;
            let newValue = value.replace(/https:\/\/(www\.)?x\.com/g, 'https://twitter.com');

            if (value !== newValue) {
                if (field.tagName.toLowerCase() === 'textarea' || field.tagName.toLowerCase() === 'input') {
                    field.value = newValue;
                } else {
                    field.innerHTML = newValue;
                }

                // Restore the cursor position
                let newCursorPosition = getCursorPosition(field);
                if (newCursorPosition > cursorPosition) {
                    setCursorPosition(field, newCursorPosition - (value.length - newValue.length));
                } else {
                    setCursorPosition(field, newCursorPosition);
                }
            }

            // Check if the user pressed Enter
            if (event.type === 'keydown' && event.key === 'Enter') {
                // Save the current cursor position
                cursorPosition = getCursorPosition(field);

                // Replace URLs in the new line
                let lines = (field.value || field.innerHTML).split('\n');
                lines[cursorPosition.line] = lines[cursorPosition.line].replace(/https:\/\/(www\.)?x\.com/g, 'https://twitter.com');
                if (field.tagName.toLowerCase() === 'textarea' || field.tagName.toLowerCase() === 'input') {
                    field.value = lines.join('\n');
                } else {
                    field.innerHTML = lines.join('\n');
                }

                // Restore the cursor position
                let newCursorPosition = getCursorPosition(field);
                if (newCursorPosition > cursorPosition) {
                    setCursorPosition(field, newCursorPosition - (value.length - lines.join('\n').length));
                } else {
                    setCursorPosition(field, newCursorPosition);
                }
            }
        });
    }

    // Add event listeners for input and keydown events on the document
    document.addEventListener('input', replaceURLs);
    document.addEventListener('keydown', replaceURLs);
})();

Changelog:

**Version 1.0 to Version 1.3**

1. **Expanded Functionality**:
- The script now handles both text input fields and editable areas (with the `contenteditable="true"` attribute) on the web pages.
- The script now replaces URLs in the BBCode editor mode, in addition to the HTML editor mode.

2. **Improved Cursor Position Handling**:
- The script now correctly restores the cursor position after replacing URLs, even when the text length changes.
- The script now handles the case where the user presses the Enter key, replacing URLs in the new line and restoring the cursor position correctly.

3. **Expanded Matching Patterns**:
- The script now matches URLs with both the "x.com" and "www.x.com" patterns.

4. **Improved Readability and Maintainability**:
- The script has been reorganized and formatted for better readability and maintainability.
- The script now uses more descriptive variable and function names.

5. **Increased Robustness**:
- The script now handles edge cases more gracefully, ensuring a smoother user experience.

Overall, the changes from Version 1.0 to Version 1.3 have significantly improved the functionality, reliability, and user experience of the script.[/icode]
 
Last edited:

O³ (O cubed)

No more PAWGs, PLEASE??!!!??
Joined
Mar 8, 2017
Messages
17,018
Reputation
3,468
Daps
62,576
Reppin
Hackney, London
Can somebody show me how to post a pic?

It seems so arduous to find a third party hosting site, upload a pic then trying to embed it.
 

bnew

Veteran
Joined
Nov 1, 2015
Messages
56,018
Reputation
8,229
Daps
157,691
Can somebody show me how to post a pic?

It seems so arduous to find a third party hosting site, upload a pic then trying to embed it.

https://imgur.com for SFW images and Upload Image — Free Image Hosting for NSFW images.

when you go to either site if the image is in your clipboard than just press Ctrl+V when you are on the homepage. and it'll upload the image in your clipboard. if you have you need to select an image from a folder on your harddrive than click upload on ibb.co or "new post" on imgur.

when the image is uploaded just right click on the uploaded image and select "Copy Image Address". come back to the coli and press "insert image" in html editor menu.
DorwMhP.png


paste the image link inside and click 'Insert'.

you can also manually type in the bbcode [IMG]https://i.imgur.com/DorwMhP.png[/IMG] , thats [IMG] and [/IMG]
 
Last edited:

bnew

Veteran
Joined
Nov 1, 2015
Messages
56,018
Reputation
8,229
Daps
157,691
used llama-3-sonar-large-32k-chat LLM to create a userscript that can automatically convert x.com or x.com tweet links to twitter embeds, it'll also automatically convert x.com urls that you post to twitter.com to save time from having to correctly embed the link.

userscript:




bookmarklet:

wasn't able to host the code on the coli for some reason.. :francis:

**Layman's Summary:**

This userscript is a tool that helps convert links from `x.com` to Twitter embeds on specific websites. It does two main things:

1. **Converts links**: When you're browsing certain websites, the script automatically converts any links from `x.com` that contain "/status/" into a Twitter embed, which allows you to view the tweet directly on the page.
2. **Replaces URLs**: The script also replaces any `x.com` URLs in text input fields and editable areas with `Twitter.com` URLs, making it easier to share Twitter links.
 
Last edited:

bnew

Veteran
Joined
Nov 1, 2015
Messages
56,018
Reputation
8,229
Daps
157,691






This userscript makes it easier to share and view social media posts on a specific online forum. Here's what it does:

* When someone shares a link to a Mastodon, Twitter, or X.com post, the script replaces the link with the actual post content, so you can see the post directly on the forum without having to click on the link.
* It makes the posts look nice and fit well on the forum page.
* It also adds a few extra features, like allowing you to view Twitter posts on a different website called Twstalker.
* When you're typing a message on the forum, the script will automatically replace any Twitter links you type with the correct format, so you don't have to worry about formatting them correctly.
* If someone shares a link to an image on Twitter (Twimg), the script will replace the link with a direct link to the image, so you can see the image right away.
* If someone shares a link to a Reddit preview image, the script will replace the link with a direct link to the image on Reddit, so you can see the full image.

Overall, it makes it easier to share and engage with social media content on the forum, and makes the experience more convenient and visually appealing.

DwgDWUJ.png
 
Last edited:

bnew

Veteran
Joined
Nov 1, 2015
Messages
56,018
Reputation
8,229
Daps
157,691
got a bookmarklet to extract tweets from Nitter (nitter.poast.org) the twitter mirro that doesn't require an account to see replies or a timeline. i converted my original bookmarklet to work for the site.

*AI Generated:

Here's a summary of what the code does, explained in simple terms:

**Step 1: Find Tweets**

* The code looks at the webpage and finds all the tweets on it.
* It counts how many tweets it found and stops if it didn't find any.

**Step 2: Get Tweet Links**

* The code goes through each tweet and finds the link to the tweet.
* It changes the link to point to Twitter instead of the current website.
* It adds the link to a list of tweet links.

**Step 3: Get Tweet Text**

* The code goes through each tweet link and finds the text of the tweet.
* It fixes any links in the tweet text to make them look nice.
* It adds the tweet text to a list of tweet texts.

**Step 4: Get Image Links**

* The code goes through each tweet and finds any images in the tweet.
* It changes the image links to point to Twitter instead of the current website.
* It adds the image links to a list of image links.

**Step 5: Make a Nice Text**

* The code takes all the tweet texts and image links and makes a nice, formatted text.
* It adds some extra information to the text, like the link to the tweet thread.

**Step 6: Make a Copy Button**

* The code creates a button on the webpage that says "Copy Tweets".
* When you click the button, it does some magic to copy the nice text to your clipboard.

**Step 7: Copy Text**

* When you click the button, the code copies the nice text to your clipboard.
* It shows a little message to let you know that it worked.
* It then removes the button and the message from the webpage.

That's it


used Claude Sonnet 3.5 to adapt it.
works on https://nitter.poast.org but it can work on other nitter instances.
fixed an issue i had trouble figuring out because the copy to clipboard function wasn't working so as a work around the old code has a button you had to press to copy to clipboard. now thats not necessary.

update code:

COde modified with Llama-3-sonar-large-32k-chat

The main difference between the original bookmarklet and the updated bookmarklet is in the way they handle the tweet text formatting. The updated bookmarklet includes additional code to preserve newlines in the tweet text by replacing `<br>` tags with `\n` and replacing `&nbsp;` with regular spaces. This ensures that the tweet text is formatted correctly when copied to the clipboard.

Here is the specific code that was added to the updated bookmarklet:

Code:
tweetText = tweetText.replace(/<br[^>]*>/g, '\n');
tweetText = tweetText.replace(/&nbsp;/g, ' ');

These lines of code replace `<br>` tags with newlines (`\n`) and replace non-breaking spaces (`&nbsp;`) with regular spaces. This helps to maintain the original line breaks and spacing in the tweet text when it is copied to the clipboard.


#### Changes

- **Improved Tweet Text Formatting**: The bookmarklet now preserves newlines in tweet text by replacing `<br>` tags with `\n` and replacing `&nbsp;` with regular spaces.
- **Enhanced Image URL Collection**: The bookmarklet now collects image URLs more efficiently by using `flatMap` and `map` functions.
- **Refactored Code**: The code has been refactored for better readability and maintainability.

The updated bookmarklet now formats tweet text more accurately by preserving line breaks and spacing. It also collects image URLs more efficiently, making it easier to copy and share tweet threads. The code has been improved for better performance and readability.



**When using `Command()` in a bookmarklet, use encoded single quotes around the command string**


Instead of:`Command('copy');
Use: Command(%27copy%27);

This will help you avoid the "was denied because it was not called from inside a short running user-generated event handler" error in Firefox.


 
Last edited:

bnew

Veteran
Joined
Nov 1, 2015
Messages
56,018
Reputation
8,229
Daps
157,691
amended the bookmarklet to extract tweets from Nitter (nitter.poast.org) so that it would only get text replies from the original author and not from all users like the old code did.

used llama-3.1-70b-instruct:



Changelog Summary
  • Added a check for the tweet author in the forEach loops to only process tweets from the original user.
  • Added a tweetAuthor variable to store the author of each tweet.
  • Modified the forEach loops to filter out tweets from other users.
  • Modified the flatMap function to filter out images from other users.
Note: The diff only shows the changes made to the original code. The rest of the code remains the same.

updated 8/18/2024


fixed a big where some tweets didn't get extracted.

**Changelog**

* Initial code:
+ Collected tweets from a Twitter thread on Nitter
+ Extracted tweet text and images
+ Formatted text and images into a single string
+ Copied string to clipboard
* Changes:
+ Added null checks for `username` and `tweetContent` elements to prevent errors
+ Modified code to access `href` attribute of `tweet-link` element correctly
+ Used `URL` API to construct full URL from relative URL
+ Added check to ensure `tweetAuthor` element is not null before accessing its `textContent` property
+ Modified code to correctly extract URLs for tweets and include them in "thread continued" spoiler
* Final code:
+ Collected tweets from a Twitter thread on Nitter
+ Extracted tweet text and images
+ Formatted text and images into a single string
+ Copied string to clipboard
+ Correctly extracted URLs for tweets and included them in "thread continued" spoiler

**Layman's Summary**

This code is a bookmarklet that helps you collect tweets from a Twitter thread on Nitter, a Twitter proxy website. It extracts the text and images from each tweet, formats them into a single string, and copies the string to your clipboard.

Initially, the code had some issues that caused errors, but we fixed them by adding some checks to make sure the code doesn't crash. We also modified the code to correctly extract the URLs for the tweets, so you can easily share them with others.

Now, the code works smoothly and correctly extracts the tweets, text, and images, and even includes the URLs for the tweets in the "thread continued" spoiler. This makes it easy to share the tweets with others, and it's a big time-saver if you need to collect tweets from a long thread.
 
Last edited:

bnew

Veteran
Joined
Nov 1, 2015
Messages
56,018
Reputation
8,229
Daps
157,691
ok i updated the code from post #39 that extracted tweets only from the user to now work on any Nitter instance xcancel.com , nitter.poast.org or whatever.
it'll replace the domains with twitter.com and image urls with pbs.twimg.com i also fixed a bug that prevented it from extracting the url from a users 1st wn reply to their own tweet.

AI / LLM used to modify: llama-3.1-sonar-large-128k-chat



Changelog​

Version 1.0: Initial Release
  • Feature: Extracts tweets from a webpage and formats them for easy copying.
  • Feature: Includes URLs of subsequent tweets in the "thread continued" spoiler.
  • Feature: Replaces domain of URLs with twitter.com for consistency.
  • Feature: Correctly formats image URLs to use pbs.twimg.com/media/.
  • Feature: Removes query parameters from image URLs.
  • Feature: Ensures only one /media/ path in image URLs.
  • Feature: Includes all relevant URLs from tweets by the same author in the "thread continued" spoiler.
  • Feature: Formats tweet texts to remove HTML tags and replace links with ${link}.
Version 1.1: Tweaks and Improvements
  • Improvement: Updated logic to handle dynamic domains correctly.
  • Improvement: Ensured that the first URL is included in the "thread continued" spoiler.
  • Improvement: Corrected formatting of tweet texts to match desired output.

Layman's Summary​

What the Code Does:This script helps you collect and format tweets from a webpage so you can easily copy and share them. Here's a simple breakdown:
  1. Extracts Tweets:
    • The script finds all tweets on the page and extracts their text content.
  2. Formats Tweets:
    • It formats each tweet text to remove any HTML tags and replace links with a readable format.
    • It numbers each tweet (e.g., "1/2", "2/2") for clarity.
  3. Includes URLs:
    • If there are multiple tweets in a thread, it includes the URLs of all these tweets in a "thread continued" section.
    • Ensures that the first tweet's URL is also included in this section.
  4. Corrects Image URLs:
    • It corrects image URLs to use the proper Twitter image domain (pbs.twimg.com/media/) and removes any unnecessary query parameters.
  5. Copies to Clipboard:
    • The formatted text, including all tweets, URLs, and images, is copied to your clipboard for easy sharing.
  6. Notification:
    • After copying, a notification appears briefly to confirm that the content has been copied successfully.
This script simplifies the process of collecting and sharing tweets while maintaining their original context and formatting.
 

bnew

Veteran
Joined
Nov 1, 2015
Messages
56,018
Reputation
8,229
Daps
157,691
made a modification to the Nitter bookmarklet to also extract/include tweets urls from from users in the comments too.



Modified Script to Include All Tweet Text and URLs​

example output:
Code:
.
https://twitter.com/ArtificialAnlys/status/1832806801743774199
[SPOILER="thread continued"]
https://twitter.com/ArtificialAnlys/status/1832806806244171857#m
https://twitter.com/AGItechgonewild/status/1832817566416621652#m
https://twitter.com/literallydenis/status/1832821123836244470#m
https://twitter.com/abacaj/status/1832816808690114642#m
https://twitter.com/teortaxesTex/status/1832839214183993427#m
https://twitter.com/skizofck/status/1832811085000306916#m
https://twitter.com/ks458008/status/1832895508731593085#m
https://twitter.com/shawnchauhan1/status/1832896278570938445#m
https://twitter.com/koenvaneijk/status/1832888947065880804#m
https://twitter.com/WIRExHEAD/status/1832870904231039244#m
https://twitter.com/pigeon__s/status/1832808227525767325#m
https://twitter.com/atlantis__labs/status/1832832852259418334#m
https://twitter.com/natolambert/status/1832873062024528176#m
https://twitter.com/_arohan_/status/1832813561485418715#m
https://twitter.com/LaurenceBrem/status/1832808220106039353#m
https://twitter.com/ArifkhanMan/status/1832809986914910410#m
https://twitter.com/natsothanaphan/status/1832816950566936977#m
https://twitter.com/kadenbilyeu0/status/1832819043788870044#m
https://twitter.com/LiteBlackEagle/status/1832844531198550155#m
https://twitter.com/Bakaburg1/status/1832883138613452991#m
[/SPOILER]
[SPOILER="full text"]

1/21
We have now partially replicated Reflection Llama 3.1 70B’s evaluation claims, but we would caution that these results are not entirely apples-to-apples comparable with other models

Since our first testing of the public release version of Reflection Llama 3.1 70B, @mattshumer_ has shared access to a privately hosted version of the model that does not suffer from the issues with the version publicly released on Hugging Face. Per the charts below, the evaluation scores we’re seeing on this version are impressive - they show performance above the level of Llama 3.1 405B for GPQA and MATH. MMLU is in-line with Meta’s release of Llama 3.1 70B, indicating performance improvements may not be consistent in all contexts.

The chart below is based on our standard methodology and system prompt. When using Reflection’s default system prompt and extracting answers only from within Reflection’s &lt;output&gt; tags, results show substantial improvement: MMLU: 87% (in-line with Llama 405B), GPQA: 54%, Math: 73%.

The model seems to be achieving these results through forcing an output ‘reflection’ response where the model always generates scaffolding of &lt;thinking&gt;, &lt;reflection&gt;, and &lt;output&gt;. In doing this it generates more tokens than other models do on our eval suite with our standard ‘think step by step’ prompting. For GPQA, Reflection 70B generates consistently more output tokens that other models (see below for detailed comparison).

While the benchmark results are impressive, they should not be considered apples-to-apples with traditional instruct-tuned models. The results may be less applicable to generalized non-benchmark measured intelligence for the following reasons:
‣ Reflection scaffolding is an example of test-time compute scaling - using more inference compute to get to an answer. It introduces a new kind of compute and latency trade-off to be considered alongside model size and non-reflection intelligence. Compared to a model of the same size, Reflection 70B appears to use more compute and take longer to get to get to an answer.
‣ This approach to achieving reflection via fine-tuning restricts the flexibility of the model and may make it unsuitable for many use-cases. Compared to achieving chain-of-thought techniques via prompting, fine-tuning in the reflection approach means the form of reasoning cannot be changed. For example, it appears that Reflection 70B is not capable of ‘just responding with the answer’ in response to an instruction to classify something and only respond with a one word category. It may also be limited in the types of reasoning approaches it can pursue (non-reflection oriented).

Ultimately, Reflection 70B appears to demonstrate the potential of fine-tuning with standardized response scaffolding alongside test-time compute scaling. Given the impressive results, further research should be conducted on the advantages and drawbacks of this approach, including the degree to which it generalizes beyond evaluations to real-world use-cases.

All that being said: if applying reflection fine-tuning drives a similar jump in eval performance on Llama 3.1 405B, we expect Reflection 405B to achieve near SOTA results across the board.

Notes on the results:
‣ These were tested on a private API version and not an open-weights version.
‣ We cannot yet independently confirm that these results are not the result of benchmark contamination.
‣ Tests for Reflection were run with 6000 max output tokens (as opposed to our standard 2048 max output tokens). We have not yet studied the effect of a lower max output token setting on Reflection.

2/21
Reflection 70B is not the only model that has trended toward using more tokens at inference time. We see a wide spread in how verbose models are as they walk through chain of thought reasoning in our evaluations.

We compared the average number of characters in each response to the questions in the GPQA dataset with our standard prompting. Reflection 70B generates more characters for each GPQA response than any other models we have tested but the total volume is less than 2x an average across other recent models.

Given that total inference compute is proportional the product of total tokens and parameter count, this means that Reflection 70B uses substantially less total compute to achieve its GPQA score than Llama 3.1 405B.

3/21
Maybe the best way forward for these Benchmarks would be that all models are allowed to use the same amount of compute budget as the highest model.

“Given that total inference compute is proportional the product of total tokens and parameter count, this means that Reflection 70B uses substantially less total compute to achieve its GPQA score than Llama 3.1 405B.”

Then smaller models could do all kinds of tricks as long as they were within budget.

Don’t underestimate an army of smaller models ✨

4/21
I think it’s also unfair to perform benchmarking with a custom system prompt (that forces the model to perform COT-like reasoning), while other models are tested as is, without advanced prompting techniques

BTW is there a simple way to benchmark these system prompts?

5/21
still waiting for the correct weights to try the model locally, not sure why it has to remain behind an api (hard to tell what is being served then)

6/21
&gt; has shared access to a privately hosted version of the model

Have you verified that it's 70B?

7/21
reddit guys gonna be real mad

8/21
Reflection Llama 3.1 70B shows promise, but caution is warranted. Testing on private APIs and using custom prompts complicates fair comparisons. Potential benchmark contamination and lack of open verification are concerns. While improvements in efficiency, flexibility, and real-world applicability are intriguing, reproduction with public weights is crucial. The potential for better AI explainability and human-AI collaboration is exciting, but claims need careful scrutiny. As fair, detailed verification progresses, we'll better understand this approach's true value. For now, these impressive results require a balanced, skeptical perspective.

9/21
Still waiting for the right weights to test the model locally. It's frustrating that it's only available through an API—it makes it hard to see what's actually being used.

10/21
this benchmark is useless. the weights are not publicly released. you can't verify anything like this. waste of energy and time. @mattshumer_ no offense and a lot of respect, but this looks bad, uploading to HF is ezpz.

11/21
&gt;These were tested on a private API version and not an open-weights version.
&gt;We cannot yet independently confirm that these results are not the result of benchmark contamination.

12/21
that doesn't really seem fair I mean I'm sure if you asked all the models when benchmarking them to do all the fancy prompting techniques they would probably do better than plain outputs too why not do the same thing for Claude it something and see how much better it does on it

13/21
to skip intermediate gen, I prompted w &lt;output&gt; tag and results are still decent wasn't total garbage

14/21
Scaling laws changing again to take into account different types of inference :) [U][URL]https://www.interconnects.ai/p/openai-strawberry-and-inference-scaling-laws[/URL][/U]

15/21
Few ideas to test thats quite easy: You could reorder the MMLU answers to check.

Another is remove partial question (drop half tokens) and check accuracy as well.

16/21
Thank you for the thorough analysis and discussion. Will you also test the 405B model when it becomes available?

17/21
MMLU 87% or 84%??

18/21
Thank you for the independent verification work.

Given that some of the skepticism is that this is mostly due to prompt engineering baked into the model, do you also perhaps have the comparisons where all models use Reflection’s system prompt? In particular, the comparison with Llama 3.1 70B would be useful.

19/21
Thank you.

I still want the proper weights

20/21
To me, it's still just a reminder, too difficult to grasp. My skill level is low, so I'll find joy elsewhere. My advice is: it might be worth trying when you don't fully understand the process, but don't spend too much time on it. You can have these things and more simply by asking AI to create reminders.

21/21
Does the test use the same scaffolded system prompt with the other models too? I think it’s important to separate the impact of the prompt from the impact of the fine tune from the impact of both factors together. A crossed experiment plus a regression analysis can achive this.


[COLOR=rgb(184, 49, 47)][B][SIZE=5]To post tweets in this format, more info here: [URL]https://www.thecoli.com/threads/tips-and-tricks-for-posting-the-coli-megathread.984734/post-52211196[/URL][/SIZE][/B][/COLOR]
[/SPOILER]
[SPOILER="larger images"]
[img]https://pbs.twimg.com/media/GW9rPLqbsAA3uwv.jpg[/img]
[img]https://pbs.twimg.com/media/GW9r48VaoAAsytO.jpg[/img]
[img]https://pbs.twimg.com/media/GW953HUW8AAU0R_.jpg[/img]
[img]https://pbs.twimg.com/media/GW9z61HWQAAgMQF.jpg[/img]
[/SPOILER]
 

bnew

Veteran
Joined
Nov 1, 2015
Messages
56,018
Reputation
8,229
Daps
157,691
heres another Nitter bookmarklet version of #41 that does the same thing but include the @Username above each tweet text.




Modified Script to Include @Username Below Each Tweet Text​

example output:
Code:
https://twitter.com/ArtificialAnlys/status/1832806801743774199
[SPOILER="thread continued"]
https://twitter.com/ArtificialAnlys/status/1832806806244171857#m
https://twitter.com/AGItechgonewild/status/1832817566416621652#m
https://twitter.com/literallydenis/status/1832821123836244470#m
https://twitter.com/abacaj/status/1832816808690114642#m
https://twitter.com/teortaxesTex/status/1832839214183993427#m
https://twitter.com/skizofck/status/1832811085000306916#m
https://twitter.com/ks458008/status/1832895508731593085#m
https://twitter.com/shawnchauhan1/status/1832896278570938445#m
https://twitter.com/koenvaneijk/status/1832888947065880804#m
https://twitter.com/WIRExHEAD/status/1832870904231039244#m
https://twitter.com/natolambert/status/1832873062024528176#m
https://twitter.com/pigeon__s/status/1832808227525767325#m
https://twitter.com/atlantis__labs/status/1832832852259418334#m
https://twitter.com/_arohan_/status/1832813561485418715#m
https://twitter.com/LaurenceBrem/status/1832808220106039353#m
https://twitter.com/ArifkhanMan/status/1832809986914910410#m
https://twitter.com/natsothanaphan/status/1832816950566936977#m
https://twitter.com/kadenbilyeu0/status/1832819043788870044#m
https://twitter.com/LiteBlackEagle/status/1832844531198550155#m
https://twitter.com/squizzster/status/1832817077075239376#m
https://twitter.com/indexclone1980/status/1832905376418664869#m
https://twitter.com/Bakaburg1/status/1832883138613452991#m
https://twitter.com/eztati/status/1832847080026783802#m
https://twitter.com/donelianc/status/1832837473690350008#m
https://twitter.com/florianleuerer/status/1832826174063399088#m
https://twitter.com/1iSku/status/1832829777599984043#m
https://twitter.com/tim_tyler/status/1832879327463035336#m
https://twitter.com/BRussellsimp/status/1832807818388267051#m
https://twitter.com/TonyPiassa/status/1832826400090296589#m
https://twitter.com/ZarekVoss/status/1832810318407389213#m
[/SPOILER]
[SPOILER="full text"]

1/31
@ArtificialAnlys
We have now partially replicated Reflection Llama 3.1 70B’s evaluation claims, but we would caution that these results are not entirely apples-to-apples comparable with other models

Since our first testing of the public release version of Reflection Llama 3.1 70B, @mattshumer_ has shared access to a privately hosted version of the model that does not suffer from the issues with the version publicly released on Hugging Face. Per the charts below, the evaluation scores we’re seeing on this version are impressive - they show performance above the level of Llama 3.1 405B for GPQA and MATH. MMLU is in-line with Meta’s release of Llama 3.1 70B, indicating performance improvements may not be consistent in all contexts.

The chart below is based on our standard methodology and system prompt. When using Reflection’s default system prompt and extracting answers only from within Reflection’s &lt;output&gt; tags, results show substantial improvement: MMLU: 87% (in-line with Llama 405B), GPQA: 54%, Math: 73%.

The model seems to be achieving these results through forcing an output ‘reflection’ response where the model always generates scaffolding of &lt;thinking&gt;, &lt;reflection&gt;, and &lt;output&gt;. In doing this it generates more tokens than other models do on our eval suite with our standard ‘think step by step’ prompting. For GPQA, Reflection 70B generates consistently more output tokens that other models (see below for detailed comparison).

While the benchmark results are impressive, they should not be considered apples-to-apples with traditional instruct-tuned models. The results may be less applicable to generalized non-benchmark measured intelligence for the following reasons:
‣ Reflection scaffolding is an example of test-time compute scaling - using more inference compute to get to an answer. It introduces a new kind of compute and latency trade-off to be considered alongside model size and non-reflection intelligence. Compared to a model of the same size, Reflection 70B appears to use more compute and take longer to get to get to an answer.
‣ This approach to achieving reflection via fine-tuning restricts the flexibility of the model and may make it unsuitable for many use-cases. Compared to achieving chain-of-thought techniques via prompting, fine-tuning in the reflection approach means the form of reasoning cannot be changed. For example, it appears that Reflection 70B is not capable of ‘just responding with the answer’ in response to an instruction to classify something and only respond with a one word category. It may also be limited in the types of reasoning approaches it can pursue (non-reflection oriented).

Ultimately, Reflection 70B appears to demonstrate the potential of fine-tuning with standardized response scaffolding alongside test-time compute scaling. Given the impressive results, further research should be conducted on the advantages and drawbacks of this approach, including the degree to which it generalizes beyond evaluations to real-world use-cases.

All that being said: if applying reflection fine-tuning drives a similar jump in eval performance on Llama 3.1 405B, we expect Reflection 405B to achieve near SOTA results across the board.

Notes on the results:
‣ These were tested on a private API version and not an open-weights version.
‣ We cannot yet independently confirm that these results are not the result of benchmark contamination.
‣ Tests for Reflection were run with 6000 max output tokens (as opposed to our standard 2048 max output tokens). We have not yet studied the effect of a lower max output token setting on Reflection.

2/31
@ArtificialAnlys
Reflection 70B is not the only model that has trended toward using more tokens at inference time. We see a wide spread in how verbose models are as they walk through chain of thought reasoning in our evaluations.

We compared the average number of characters in each response to the questions in the GPQA dataset with our standard prompting. Reflection 70B generates more characters for each GPQA response than any other models we have tested but the total volume is less than 2x an average across other recent models.

Given that total inference compute is proportional the product of total tokens and parameter count, this means that Reflection 70B uses substantially less total compute to achieve its GPQA score than Llama 3.1 405B.

3/31
@AGItechgonewild
Maybe the best way forward for these Benchmarks would be that all models are allowed to use the same amount of compute budget as the highest model.

“Given that total inference compute is proportional the product of total tokens and parameter count, this means that Reflection 70B uses substantially less total compute to achieve its GPQA score than Llama 3.1 405B.”

Then smaller models could do all kinds of tricks as long as they were within budget.

Don’t underestimate an army of smaller models ✨

4/31
@literallydenis
I think it’s also unfair to perform benchmarking with a custom system prompt (that forces the model to perform COT-like reasoning), while other models are tested as is, without advanced prompting techniques

BTW is there a simple way to benchmark these system prompts?

5/31
@abacaj
still waiting for the correct weights to try the model locally, not sure why it has to remain behind an api (hard to tell what is being served then)

6/31
@teortaxesTex
&gt; has shared access to a privately hosted version of the model

Have you verified that it's 70B?

7/31
@skizofck
reddit guys gonna be real mad

8/31
@ks458008
Reflection Llama 3.1 70B shows promise, but caution is warranted. Testing on private APIs and using custom prompts complicates fair comparisons. Potential benchmark contamination and lack of open verification are concerns. While improvements in efficiency, flexibility, and real-world applicability are intriguing, reproduction with public weights is crucial. The potential for better AI explainability and human-AI collaboration is exciting, but claims need careful scrutiny. As fair, detailed verification progresses, we'll better understand this approach's true value. For now, these impressive results require a balanced, skeptical perspective.

9/31
@shawnchauhan1
Still waiting for the right weights to test the model locally. It's frustrating that it's only available through an API—it makes it hard to see what's actually being used.

10/31
@koenvaneijk
this benchmark is useless. the weights are not publicly released. you can't verify anything like this. waste of energy and time. @mattshumer_ no offense and a lot of respect, but this looks bad, uploading to HF is ezpz.

11/31
@WIRExHEAD
&gt;These were tested on a private API version and not an open-weights version.
&gt;We cannot yet independently confirm that these results are not the result of benchmark contamination.

12/31
@natolambert
Scaling laws changing again to take into account different types of inference :) [U][URL]https://www.interconnects.ai/p/openai-strawberry-and-inference-scaling-laws[/URL][/U]

13/31
@pigeon__s
that doesn't really seem fair I mean I'm sure if you asked all the models when benchmarking them to do all the fancy prompting techniques they would probably do better than plain outputs too why not do the same thing for Claude it something and see how much better it does on it

14/31
@atlantis__labs
to skip intermediate gen, I prompted w &lt;output&gt; tag and results are still decent wasn't total garbage

15/31
@_arohan_
Few ideas to test thats quite easy: You could reorder the MMLU answers to check.

Another is remove partial question (drop half tokens) and check accuracy as well.

16/31
@LaurenceBrem
Thank you for the thorough analysis and discussion. Will you also test the 405B model when it becomes available?

17/31
@ArifkhanMan
MMLU 87% or 84%??

18/31
@natsothanaphan
Thank you for the independent verification work.

Given that some of the skepticism is that this is mostly due to prompt engineering baked into the model, do you also perhaps have the comparisons where all models use Reflection’s system prompt? In particular, the comparison with Llama 3.1 70B would be useful.

19/31
@kadenbilyeu0
Thank you.

I still want the proper weights

20/31
@LiteBlackEagle
To me, it's still just a reminder, too difficult to grasp. My skill level is low, so I'll find joy elsewhere. My advice is: it might be worth trying when you don't fully understand the process, but don't spend too much time on it. You can have these things and more simply by asking AI to create reminders.

21/31
@squizzster
Congratulations on the fine-tuning! 
This technique is kinda forcing the model to adhere to a structured verbose response thereby generating improved answers.

This is kinda 😎. My question ❓ is would a STD Llama 3.1 using a verbose prompt guide the model to the same output?

22/31
@indexclone1980
Please make sure that what you tested (internally) isn't just a proxy version of Claude 3.5 Sonnet since the OR ver linking to Matt's internal seems like it from the way it responds. Or else this entire test needs to be taken down due to being fake.

23/31
@Bakaburg1
Does the test use the same scaffolded system prompt with the other models too? I think it’s important to separate the impact of the prompt from the impact of the fine tune from the impact of both factors together. A crossed experiment plus a regression analysis can achive this.

24/31
@eztati
"may make it unsuitable for many use-cases" , what are those cases? You could just hide the reflection process to the final user an get the "just respond with the answer" effect .

25/31
@donelianc
great post! huge thanks to the team for sharing these insights.

26/31
@florianleuerer
How do you run the MMLU benchmarks? The default implementation is logprob and not „generation“. I guess you run it as a generation benchmark and prompt the models to genrate the letter of the correct answer?

27/31
@1iSku
shouldnt it compare with fewshot version of those closeai model?

28/31
@tim_tyler
IMO, you should compare with the approach described here: [U][URL]https://matchingpennies.com/llm_plus_plus/[/URL][/U]
It uses reflection on expanded previous output - but with simple prompting and no retraining required.

29/31
@BRussellsimp
Holy water! @mattshumer_  where to find this?

30/31
@TonyPiassa
Working on a classifier of database records, tables, columns with complex reflection and thinking processes, but I want the output to be strictly table and column and one word classification. It is possible to get it to do that with the correct prompting.

31/31
@ZarekVoss
&gt;These were tested on a private API version and not an open-weights version.
hmmm.... will wait for open weights version's evaluation and results.


[COLOR=rgb(184, 49, 47)][B][SIZE=5]To post tweets in this format, more info here: [URL]https://www.thecoli.com/threads/tips-and-tricks-for-posting-the-coli-megathread.984734/post-52211196[/URL][/SIZE][/B][/COLOR]
[/SPOILER]
[SPOILER="larger images"]
[img]https://pbs.twimg.com/media/GW9rPLqbsAA3uwv.jpg[/img]
[img]https://pbs.twimg.com/media/GW9r48VaoAAsytO.jpg[/img]
[img]https://pbs.twimg.com/media/GW953HUW8AAU0R_.jpg[/img]
[img]https://pbs.twimg.com/media/GW9z61HWQAAgMQF.jpg[/img]
[/SPOILER]
 

bnew

Veteran
Joined
Nov 1, 2015
Messages
56,018
Reputation
8,229
Daps
157,691
the is the same nitter bookmarklet version as #42 but it excludes tweet urls from threads continued spoiler that doesn't belong to the original tweet author but it keeps the @Username in "full text" spoiler so you can follow along with the replies.





example output:
Code:
https://twitter.com/ArtificialAnlys/status/1832806801743774199
[SPOILER="thread continued"]
https://twitter.com/ArtificialAnlys/status/1832806806244171857#m
[/SPOILER]
[SPOILER="full text"]

1/11
@ArtificialAnlys
We have now partially replicated Reflection Llama 3.1 70B’s evaluation claims, but we would caution that these results are not entirely apples-to-apples comparable with other models

Since our first testing of the public release version of Reflection Llama 3.1 70B, @mattshumer_ has shared access to a privately hosted version of the model that does not suffer from the issues with the version publicly released on Hugging Face. Per the charts below, the evaluation scores we’re seeing on this version are impressive - they show performance above the level of Llama 3.1 405B for GPQA and MATH. MMLU is in-line with Meta’s release of Llama 3.1 70B, indicating performance improvements may not be consistent in all contexts.

The chart below is based on our standard methodology and system prompt. When using Reflection’s default system prompt and extracting answers only from within Reflection’s &lt;output&gt; tags, results show substantial improvement: MMLU: 87% (in-line with Llama 405B), GPQA: 54%, Math: 73%.

The model seems to be achieving these results through forcing an output ‘reflection’ response where the model always generates scaffolding of &lt;thinking&gt;, &lt;reflection&gt;, and &lt;output&gt;. In doing this it generates more tokens than other models do on our eval suite with our standard ‘think step by step’ prompting. For GPQA, Reflection 70B generates consistently more output tokens that other models (see below for detailed comparison).

While the benchmark results are impressive, they should not be considered apples-to-apples with traditional instruct-tuned models. The results may be less applicable to generalized non-benchmark measured intelligence for the following reasons:
‣ Reflection scaffolding is an example of test-time compute scaling - using more inference compute to get to an answer. It introduces a new kind of compute and latency trade-off to be considered alongside model size and non-reflection intelligence. Compared to a model of the same size, Reflection 70B appears to use more compute and take longer to get to get to an answer.
‣ This approach to achieving reflection via fine-tuning restricts the flexibility of the model and may make it unsuitable for many use-cases. Compared to achieving chain-of-thought techniques via prompting, fine-tuning in the reflection approach means the form of reasoning cannot be changed. For example, it appears that Reflection 70B is not capable of ‘just responding with the answer’ in response to an instruction to classify something and only respond with a one word category. It may also be limited in the types of reasoning approaches it can pursue (non-reflection oriented).

Ultimately, Reflection 70B appears to demonstrate the potential of fine-tuning with standardized response scaffolding alongside test-time compute scaling. Given the impressive results, further research should be conducted on the advantages and drawbacks of this approach, including the degree to which it generalizes beyond evaluations to real-world use-cases.

All that being said: if applying reflection fine-tuning drives a similar jump in eval performance on Llama 3.1 405B, we expect Reflection 405B to achieve near SOTA results across the board.

Notes on the results:
‣ These were tested on a private API version and not an open-weights version.
‣ We cannot yet independently confirm that these results are not the result of benchmark contamination.
‣ Tests for Reflection were run with 6000 max output tokens (as opposed to our standard 2048 max output tokens). We have not yet studied the effect of a lower max output token setting on Reflection.

2/11
@ArtificialAnlys
Reflection 70B is not the only model that has trended toward using more tokens at inference time. We see a wide spread in how verbose models are as they walk through chain of thought reasoning in our evaluations.

We compared the average number of characters in each response to the questions in the GPQA dataset with our standard prompting. Reflection 70B generates more characters for each GPQA response than any other models we have tested but the total volume is less than 2x an average across other recent models.

Given that total inference compute is proportional the product of total tokens and parameter count, this means that Reflection 70B uses substantially less total compute to achieve its GPQA score than Llama 3.1 405B.

3/11
@AGItechgonewild
Maybe the best way forward for these Benchmarks would be that all models are allowed to use the same amount of compute budget as the highest model.

“Given that total inference compute is proportional the product of total tokens and parameter count, this means that Reflection 70B uses substantially less total compute to achieve its GPQA score than Llama 3.1 405B.”

Then smaller models could do all kinds of tricks as long as they were within budget.

Don’t underestimate an army of smaller models ✨

4/11
@literallydenis
I think it’s also unfair to perform benchmarking with a custom system prompt (that forces the model to perform COT-like reasoning), while other models are tested as is, without advanced prompting techniques

BTW is there a simple way to benchmark these system prompts?

5/11
@teortaxesTex
&gt; has shared access to a privately hosted version of the model

Have you verified that it's 70B?

6/11
@abacaj
still waiting for the correct weights to try the model locally, not sure why it has to remain behind an api (hard to tell what is being served then)

7/11
@skizofck
reddit guys gonna be real mad

8/11
@ks458008
Reflection Llama 3.1 70B shows promise, but caution is warranted. Testing on private APIs and using custom prompts complicates fair comparisons. Potential benchmark contamination and lack of open verification are concerns. While improvements in efficiency, flexibility, and real-world applicability are intriguing, reproduction with public weights is crucial. The potential for better AI explainability and human-AI collaboration is exciting, but claims need careful scrutiny. As fair, detailed verification progresses, we'll better understand this approach's true value. For now, these impressive results require a balanced, skeptical perspective.

9/11
@shawnchauhan1
Still waiting for the right weights to test the model locally. It's frustrating that it's only available through an API—it makes it hard to see what's actually being used.

10/11
@koenvaneijk
this benchmark is useless. the weights are not publicly released. you can't verify anything like this. waste of energy and time. @mattshumer_ no offense and a lot of respect, but this looks bad, uploading to HF is ezpz.

11/11
@WIRExHEAD
&gt;These were tested on a private API version and not an open-weights version.
&gt;We cannot yet independently confirm that these results are not the result of benchmark contamination.


[COLOR=rgb(184, 49, 47)][B][SIZE=5]To post tweets in this format, more info here: [URL]https://www.thecoli.com/threads/tips-and-tricks-for-posting-the-coli-megathread.984734/post-52211196[/URL][/SIZE][/B][/COLOR]
[/SPOILER]
[SPOILER="larger images"]
[img]https://pbs.twimg.com/media/GW9rPLqbsAA3uwv.jpg[/img]
[img]https://pbs.twimg.com/media/GW9r48VaoAAsytO.jpg[/img]
[img]https://pbs.twimg.com/media/GW953HUW8AAU0R_.jpg[/img]
[img]https://pbs.twimg.com/media/GW9z61HWQAAgMQF.jpg[/img]
[/SPOILER]
 

bnew

Veteran
Joined
Nov 1, 2015
Messages
56,018
Reputation
8,229
Daps
157,691
another mod of the nitter tweet extract bookmarklet that now supports extracting quoted tweets, at least whats visible.


This Bookmarklet script helps you collect and format tweets from a Twitter thread in a specific way. Here's what it does in simple terms:
  1. Find Tweets: It looks for all tweets on the page.
  2. Identify User: It figures out whose tweets you want to collect.
  3. Collect Links and Texts: It gathers links to each tweet and their text content, including any quoted tweets.
  4. Clean Up Texts: It makes sure the text looks nice by removing unnecessary HTML tags.
  5. Get Images: It finds and formats links to larger images.
  6. Organize Everything: It puts all this information into a neat format with spoilers for easy reading.
  7. Copy to Clipboard: It copies this formatted text so you can paste it elsewhere.
  8. Notify You: It shows a quick message saying everything has been copied successfully.
This makes it easier to share or save Twitter threads with all their details intact

update: fixed an issue where [url] wasn't underlined.



example output:
Code:
https://twitter.com/Heraklines1/status/1834407208995467524#m
[SPOILER="thread continued"]
https://twitter.com/Heraklines1/status/1834411726768332907#m
https://twitter.com/Heraklines1/status/1834416519893192924#m
https://twitter.com/Heraklines1/status/1834569578891862186#m
[/SPOILER]
[SPOILER="full text"]


1/4
remember anthropic's claim that 2025-2026 the gap is going to be too large for any competitor to catch up?

that o1 rl data fly wheel is escape velocity

[Quoted tweet]
Many (including me) who believed in RL were waiting for a moment when it will start scaling in a general domain similarly to other successful paradigms. That moment finally has arrived and signifies a meaningful increase in our understanding  of training neural networks
[URL]https://twitter.com/MillionInt/status/1834402886245285960#m[/URL]

2/4
the singularitarians rn: rl scaling in general domain...? wait a sec...



3/4
another nice side effect of tighly rationing o1 is that high S/N on quality hard queries they will be receiving. ppl will largely only consult o1 for *important* tasks, easy triage

the story rly writes itself here guys

[Quoted tweet]
at only 30 requests - im going to think long and hard before i consult the oracle.
[URL]https://twitter.com/Orwelian84/status/1834302489715507555#m[/URL]

4/4
u cannot move a single step without extrapolating even a little, we draw lines regardless, including u. i am inclined to believe them here, shouldn't take too long to confirm that suspicion tho




[COLOR=rgb(184, 49, 47)][B][SIZE=5]To post tweets in this format, more info here: [URL]https://www.thecoli.com/threads/tips-and-tricks-for-posting-the-coli-megathread.984734/post-52211196[/URL][/SIZE][/B][/COLOR]
[/SPOILER]
[SPOILER="larger images"]
[img]https://pbs.twimg.com/media/GXUirofWoAAE-cN.png[/img]
[/SPOILER]
 
Last edited:

bnew

Veteran
Joined
Nov 1, 2015
Messages
56,018
Reputation
8,229
Daps
157,691
tweaked the output of a nitter bookmarklet so that it always replaced the tweet urls domain no matter the nitter instance with twitter.com and show images below corresponding text tweets. since it only collects tweets made by the main username the @ isn't added in the main full text.


Summary​

This JavaScript bookmarklet performs the following tasks:
  • Identifies Tweets: Collects all tweets on the current page.
  • Extracts Username: Identifies the username of the tweets' author.
  • Processes Tweets:
    • Extracts and cleans up tweet text.
    • Handles quoted tweets within each tweet.
    • Collects image URLs associated with each tweet.
  • Normalizes URLs: Ensures all URLs (including thread continuation URLs and the original URL) have their domains set to twitter.com.
  • Formats Output: Combines processed tweet texts, images, and URLs into a structured format.
  • Copies to Clipboard: Copies the formatted output to the clipboard.
  • Displays Notification: Shows a notification indicating that data has been copied.
The final output includes:
  • The normalized original URL
  • A spoiler section for thread continuation URLs
  • A spoiler section for full text and large images of each tweet
This bookmarklet simplifies collecting and formatting tweets along with their associated images for easy sharing or posting elsewhere.




example output:
Code:
https://twitter.com/LoveNIntegrity/status/1837912909415940421#m
[SPOILER="thread continued"]
https://twitter.com/LoveNIntegrity/status/1837904328516161588#m
https://twitter.com/LoveNIntegrity/status/1837905193511309532#m
https://twitter.com/LoveNIntegrity/status/1837906206167285970#m
https://twitter.com/LoveNIntegrity/status/1837907001118019897#m
https://twitter.com/LoveNIntegrity/status/1837907915228733839#m
https://twitter.com/LoveNIntegrity/status/1837908721239736633#m
https://twitter.com/LoveNIntegrity/status/1837909627490447588#m
https://twitter.com/LoveNIntegrity/status/1837910385715716265#m
https://twitter.com/LoveNIntegrity/status/1837911287302414493#m
https://twitter.com/LoveNIntegrity/status/1837912099013484617#m
https://twitter.com/LoveNIntegrity/status/1837913731050733866#m
https://twitter.com/LoveNIntegrity/status/1837914453414707714#m
https://twitter.com/LoveNIntegrity/status/1837915234255745270#m
https://twitter.com/LoveNIntegrity/status/1837916108512182654#m
https://twitter.com/LoveNIntegrity/status/1837916900057403504#m
https://twitter.com/LoveNIntegrity/status/1837917632978129003#m
https://twitter.com/LoveNIntegrity/status/1837918692975263862#m
https://twitter.com/LoveNIntegrity/status/1837945434838651324#m
[/SPOILER]
[SPOILER="full text & large images"]


1/19
Screenshot #21 of Heritage Foundation's 2023-2032 Budget Blueprint.

[img]https://pbs.twimg.com/media/GYGMPuGX0AEOlJ6.jpg[/img]

2/19
Screenshot #22 of Heritage Foundation's 2023-2032 Budget Blueprint.

[img]https://pbs.twimg.com/media/GYGNCJdWMAA2qnV.jpg[/img]

3/19
Screenshot #23 of Heritage Foundation's 2023-2032 Budget Blueprint.

[img]https://pbs.twimg.com/media/GYGN9EOWwAEj8-S.jpg[/img]

4/19
Screenshot #24 of Heritage Foundation's 2023-2032 Budget Blueprint.

[img]https://pbs.twimg.com/media/GYGOrUIWYAAdwEu.jpg[/img]

5/19
Screenshot #25 of Heritage Foundation's 2023-2032 Budget Blueprint.

[img]https://pbs.twimg.com/media/GYGPgjrXUAA22G7.jpg[/img]

6/19
Screenshot #26 of Heritage Foundation's 2023-2032 Budget Blueprint.

[img]https://pbs.twimg.com/media/GYGQPdWWUAAY-RQ.jpg[/img]

7/19
Screenshot #27 of Heritage Foundation's 2023-2032 Budget Blueprint.

[img]https://pbs.twimg.com/media/GYGREO-WYAATJ71.jpg[/img]

8/19
Screenshot #28 of Heritage Foundation's 2023-2032 Budget Blueprint.

[img]https://pbs.twimg.com/media/GYGRwW8XgAAhVtD.jpg[/img]

9/19
Screenshot #29 of Heritage Foundation's 2023-2032 Budget Blueprint.

[img]https://pbs.twimg.com/media/GYGSk3AXYAEmtlA.jpg[/img]

10/19
Screenshot #30 of Heritage Foundation's 2023-2032 Budget Blueprint.

[img]https://pbs.twimg.com/media/GYGTUDmXoAAOEa8.jpg[/img]

11/19
Screenshot #31 of Heritage Foundation's 2023-2032 Budget Blueprint.

[img]https://pbs.twimg.com/media/GYGUDQuWIAAlNJx.jpg[/img]

12/19
Screenshot #32 of Heritage Foundation's 2023-2032 Budget Blueprint.

[img]https://pbs.twimg.com/media/GYGUzE8WsAAWa3Y.jpg[/img]

13/19
Screenshot #33 of Heritage Foundation's 2023-2032 Budget Blueprint.

[img]https://pbs.twimg.com/media/GYGVdGsXsAAqsaJ.jpg[/img]

14/19
Screenshot #34 of Heritage Foundation's 2023-2032 Budget Blueprint.

[img]https://pbs.twimg.com/media/GYGWKlOXEAA_pAz.jpg[/img]

15/19
Screenshot #35 of Heritage Foundation's 2023-2032 Budget Blueprint.

[img]https://pbs.twimg.com/media/GYGW9e0XYAEB-IS.jpg[/img]

16/19
Screenshot #36 of Heritage Foundation's 2023-2032 Budget Blueprint.

[img]https://pbs.twimg.com/media/GYGXriIWkAAt57t.jpg[/img]

17/19
Screenshot #37 of Heritage Foundation's 2023-2032 Budget Blueprint.

[img]https://pbs.twimg.com/media/GYGYWMrXUAAuFrO.jpg[/img]

18/19
Screenshot #38 (last) of Heritage Foundation's 2023-2032 Budget Blueprint. I know I've missed items. Look at their plans yourself at [U][URL]https://www.heritage.org/budget/pages/policy-proposals.html[/URL][/U].

[img]https://pbs.twimg.com/media/GYGZT4cW4AA90-F.jpg[/img]

19/19
@threadreaderapp unroll




[COLOR=rgb(184, 49, 47)][B][SIZE=5]To post tweets in this format, more info here: [URL]https://www.thecoli.com/threads/tips-and-tricks-for-posting-the-coli-megathread.984734/post-52211196[/URL][/SIZE][/B][/COLOR]
[/SPOILER]
 
Top