We use Amazon Aurora PostgreSQL as our tulip backend database and have been very pleased with it. Super easy to scale, create read replicas etc… (but don’t forget to run Analze after a major update) and I really like calling lambdas from Aurora Postgres .
Recently I’ve been introduced to a very cool and powerful way to run Postgres, supabase
So this thing is powerful! I wanted to write images to supabase’s CDN and wanted to do it using supabase edge functions.
Similar to this tutorial
I wrote a postgres function that uses extensions.http_request
CREATE OR REPLACE FUNCTION public.store_image_to_supabase(s3_url text)
RETURNS text
LANGUAGE plpgsql
AS $function$
DECLARE
response text;
payload jsonb;
_generated_url text;
BEGIN
payload = jsonb_build_object(
'record', jsonb_build_object('s3_url', s3_url)
);
select content into response from extensions.http((
'POST',
'https://abcdefg.functions.supabase.co/s3-to-supabase',
ARRAY[extensions.http_header('SUPABASE_ANON_KEY', 'mysecret')],
'application/json',
payload
)::extensions.http_request);
select into _generated_url response::json->'fullPath';
select into _generated_url replace(_generated_url,'"','');
return _generated_url;
END
$function$
;
This would then call the edge function something like this
import { serve } from "https://deno.land/std@0.168.0/http/server.ts";
import { createClient } from 'https://esm.sh/@supabase/supabase-js@2.36.0';
import { extname } from "https://deno.land/std@0.168.0/path/mod.ts";
function getMimeTypeFromExtension(extension) {
// A simple mapping of some common extensions to MIME types
// You can extend this list as per your needs
const mimeTypes = {
".jpg": "image/jpeg",
".jpeg": "image/jpeg",
".png": "image/png",
".gif": "image/gif",
".bmp": "image/bmp"
};
return mimeTypes[extension] || "application/octet-stream";
}
serve(async (req)=>{
const payload = await req.json();
console.log(payload.record.s3_url);
if (!payload.record.s3_url) {
console.error("s3_url is missing in the payload");
return new Response("s3_url missing", {
status: 400
});
}
const response = await fetch(payload.record.s3_url);
if (!response.ok) {
console.error("Failed to fetch the file:", response.statusText);
return new Response("Failed to fetch the file", {
status: 500
});
}
const fileBuffer = new Uint8Array(await response.arrayBuffer());
// Try to get filename from Content-Disposition header
const contentDisposition = response.headers.get("Content-Disposition");
let filename = contentDisposition ? contentDisposition.split('filename=')[1]?.replace(/["]/g, '') : null;
if (!filename) {
// If filename is not in the headers, you can get it from the URL or generate a dynamic name (like a timestamp)
filename = payload.record.s3_url.split('/').pop() || `image-${Date.now()}`;
}
// Get content type from headers
const contentType = response.headers.get("Content-Type") || getMimeTypeFromExtension(extname(filename));
const supabaseClient = createClient(Deno.env.get('SUPABASE_URL'), Deno.env.get('SUPABASE_ANON_KEY'));
const { data: upload , error: uploadError } = await supabaseClient.storage.from('images').upload(filename, fileBuffer, {
contentType: contentType,
cacheControl: '3600',
upsert: false
});
if (uploadError) {
console.error(uploadError);
return new Response("Failed to upload the file", {
status: 500
});
}
// Adding prefix to the storage path
const fullPath = `https://abcdefg.supabase.co/storage/v1/object/public/images/${upload.path}`;
// Construct response object
const responseObject = {
fullPath: fullPath
};
// Return the response object as JSON
return new Response(JSON.stringify(responseObject), {
status: 200,
headers: {
"Content-Type": "application/json"
}
});
});