X

e2b dev

Bing Rank
Average Position of Bing Search Engine Ranking of related query such as 'Sales AI Agent', 'Coding AI Agent', etc.
Google Rank
Average Position of Bing Search Engine Ranking of related query such as 'Sales AI Agent', 'Coding AI Agent', etc.

Last Updated: 2025-04-15

Information

* * * $11.5M * * * [.873] [.542] [.285] [.557] [.013] [.801] [.798] [.117]️ [.070] [.833] [.984] [.342] [.330] [.103] [.004] [.400] [.087] [.707] [.212] [.616] [.206][.505] [.043] [.245] [.273] [.143] [.095] [.891] [.104] [.784] [.805] [.505] [.891] [.820] [.034] [.381]️ [.679] [.034] [.810] [.707] [.642] [.660] [.790] [.081]️ [.445] [.810] [.322] [.381]️ [.817] [.353] [.663] [.027] [.452] [.045] [.733] [.463] [.452] [.043] [.829] [.817] [.353] [.844] [.061]️ [.070] [.034] [.810] [.663] [.084] [.665] [.452] [.305] [.104] [.604] [.178] [.285] * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * – ––––– ––– ––––– ––– ––––– ––– – ––––– ––– @@@@@ ––– ––––– ––– – ––––– ––– @@@@@ ––– ––––– ––– – ––– @@@@@ ––– ––––– ––– – ––– @@@@@ ––– ––––– ––– – ––– @@@@@ ––– ––– – ––– @@@@@ ––– ––– – ––– @@@@@ ––– ––– ______ ______ ❘    ❘_\ ❘    ❘_\ ╔═══════╗ ╔═══════╗ ║  CSV  ║ ║  TXT  ║ ╚═══════╝ ╚═══════╝ ❘______❘ ❘______❘ ╔═════════════════════╗║       Sign In       ║╚═════════════════════╝ @ $120.91 @ +32%                          @                       @@@   @            @@@       @   @ @          @@   @     @     @      @@@@      @   @  @@@@           @@@ @@ @@@ @@@@@@@ @@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@@@@@@ Error: [$rootScope:inprog] $applyalready in progress at angular.js:63 –––– ––––––––– ––––––––– ––––– @ –––– ––––––––– ––– ––––– –––– @ – –––– ––––––––– –– – – @@@@ ––– @ –– –––– ––––– @@@ – – ––– @ ––– @ – @ – ––– @@ ––– @ ––– @ – –– – @ – –– –––– @@@@ –– @ ––– @ ––– ––– ––– @ –––– @@@ –––––– –––––– – @@ – ––––––––– ––––––––– –––––– @@@ @@@ @@@ @@@ X% %%%%%%%%% %%% @ @ @ @ @ < ====< < ===== =< < = ==< < == =====< < ======< < ========< < === ===< = = = = = = = = = = = = ~~~~~~‍ ~~~~~~~~~~~~~~~~~ // npm install @e2b/code-interpreter import { Sandbox } from '@e2b/code-interpreter' // Create a E2B Code Interpreter with JavaScript kernel const sandbox = await Sandbox.create() // Execute JavaScript cells await sandbox.runCode( 'x = 1' ) const execution = await sandbox.runCode( 'x+=1; x' ) // Outputs 2 console .log(execution.text) ~~~~~~‍ ~~~~~~~~~~~~~~~~~ ~~ # pip install e2b-code-interpreter from e2b_code_interpreter import Sandbox # Create a E2B Sandbox with Sandbox() as sandbox: # Run code sandbox.run_code( "x = 1" ) execution = sandbox.run_code( "x+=1; x" ) print (execution.text) # outputs 2 ‍ ‍ // npm install ai @ai-sdk/openai zod @e2b/code-interpreter import { openai } from '@ai-sdk/openai' import { generateText } from 'ai' import z from 'zod' import { Sandbox } from '@e2b/code-interpreter' // Create OpenAI client const model = openai( 'gpt-4o' ) const prompt = "Calculate how many r's are in the word 'strawberry'" // Generate text with OpenAI const { text } = await generateText({ tools : { // Define a tool that runs code in a sandbox codeInterpreter : { description : 'Execute python code in a Jupyter notebook cell and return result' , parameters : z.object({ code : z.string().describe( 'The python code to execute in a single cell' ), execute : async ({ code }) => { // Create a sandbox, execute LLM-generated code, and return the result const sandbox = await Sandbox.create() const { text, results, logs, error } = await sandbox.runCode(code) return results // This is required to feed the tool call result back to the LLM maxSteps : 2 console .log(text) ‍ ‍~~~~~~~ # pip install openai e2b-code-interpreter from openai import OpenAI from e2b_code_interpreter import Sandbox # Create OpenAI client system = "You are a helpful assistant that can execute python code in a Jupyter notebook. Only respond with the code to be executed and nothing else. Strip backticks in code blocks." prompt = "Calculate how many r's are in the word 'strawberry'" # Send messages to OpenAI API model= "gpt-4o" , { "role" : "system" , "content" : system}, { "role" : "user" , "content" : prompt} # Extract the code from the response code = response.choices[ 0 ].message.content # Execute code in E2B Sandbox if code: with Sandbox() as sandbox: print (result) ‍ ‍~~~~~~~ # pip install anthropic e2b-code-interpreter from anthropic import Anthropic from e2b_code_interpreter import Sandbox # Create Anthropic client system_prompt = "You are a helpful assistant that can execute python code in a Jupyter notebook. Only respond with the code to be executed and nothing else. Strip backticks in code blocks." prompt = "Calculate how many r's are in the word 'strawberry'" # Send messages to Anthropic API model= "claude-3-5-sonnet-20240620" , max_tokens= 1024 , { "role" : "assistant" , "content" : system_prompt}, { "role" : "user" , "content" : prompt} # Extract code from response code = response.content[ 0 ].text # Execute code in E2B Sandbox with Sandbox() as sandbox: print (result) ‍ ‍~~~~~ # pip install mistralai e2b-code-interpreter import os from mistralai import Mistral from e2b_code_interpreter import Sandbox api_key = os.environ[ "MISTRAL_API_KEY" ] # Create Mistral client system_prompt = "You are a helpful assistant that can execute python code in a Jupyter notebook. Only respond with the code to be executed and nothing else. Strip backticks in code blocks." prompt = "Calculate how many r's are in the word 'strawberry'" # Send the prompt to the model model= "codestral-latest" , { "role" : "system" , "content" : system_prompt}, { "role" : "user" , "content" : prompt} # Extract the code from the response code = response.choices[ 0 ].message.content # Execute code in E2B Sandbox with Sandbox() as sandbox: print (result) ‍ ‍~~~~~~~~~~ # pip install ollama import ollama from e2b_code_interpreter import Sandbox # Send the prompt to the model response = ollama.chat(model= "llama3.2" , messages=[ "role" : "system" , "content" : "You are a helpful assistant that can execute python code in a Jupyter notebook. Only respond with the code to be executed and nothing else. Strip backticks in code blocks." "role" : "user" , "content" : "Calculate how many r's are in the word 'strawberry'" # Extract the code from the response code = response[ 'message' ][ 'content' ] # Execute code in E2B Sandbox with Sandbox() as sandbox: print (result) ‍ ‍~~~~~ # pip install langchain langchain-openai e2b-code-interpreter from langchain_openai import ChatOpenAI from langchain_core.prompts import ChatPromptTemplate from langchain_core.output_parsers import StrOutputParser from e2b_code_interpreter import Sandbox system_prompt = "You are a helpful assistant that can execute python code in a Jupyter notebook. Only respond with the code to be executed and nothing else. Strip backticks in code blocks." prompt = "Calculate how many r's are in the word 'strawberry'" # Create LangChain components llm = ChatOpenAI(model= "gpt-4o" ) ( "system" , system_prompt), ( "human" , "{input}" ) # Create the chain # Run the chain code = chain.invoke({ "input" : prompt}) # Execute code in E2B Sandbox with Sandbox() as sandbox: print (result) ~ ~~~~~~~~~~~ from llama_index.core.tools import FunctionTool from llama_index.llms.openai import OpenAI from llama_index.core.agent import ReActAgent from e2b_code_interpreter import Sandbox # Define the tool def execute_python ( code: str ): with Sandbox() as sandbox: return execution.text name= "execute_python" , description= "Execute python code in a Jupyter notebook cell and return result" , # Initialize LLM llm = OpenAI(model= "gpt-4o" ) # Initialize ReAct agent agent = ReActAgent.from_tools([e2b_interpreter_tool], llm=llm, verbose= True ) agent.chat( "Calculate how many r's are in the word 'strawberry'" ) All JS Python Next.js LangChain LangGraph Meta OpenAI Anthropic Mistral Fireworks AI Together AI great product one hour revolutionized It just works. gain enterprises’ trust

Prompts

Reviews

Tags

Write Your Review

Detailed Ratings

ALL
Correctness
Helpfulness
Interesting
Upload Pictures and Videos