WARNING: THIS SITE IS A MIRROR OF GITHUB.COM / IT CANNOT LOGIN OR REGISTER ACCOUNTS / THE CONTENTS ARE PROVIDED AS-IS / THIS SITE ASSUMES NO RESPONSIBILITY FOR ANY DISPLAYED CONTENT OR LINKS / IF YOU FOUND SOMETHING MAY NOT GOOD FOR EVERYONE, CONTACT ADMIN AT ilovescratch@foxmail.com
Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
8b976e4
bruh
Brendonovich Oct 7, 2025
b12b221
workflow cluster stuff
Brendonovich Oct 7, 2025
fe273ee
fix vercel deploy
Brendonovich Oct 8, 2025
dc2f22b
attempt @vercel/otel integration
Brendonovich Oct 8, 2025
84c21bb
vercel otel idk
Brendonovich Oct 8, 2025
921549e
aurora db
Oct 9, 2025
8c8f599
separate shard db
Oct 9, 2025
1bcee3a
more stuff
Brendonovich Oct 9, 2025
fe3180e
make aws credentials optional
Brendonovich Oct 10, 2025
40ee84f
cleanup
Brendonovich Oct 10, 2025
b4361e4
types
Brendonovich Oct 10, 2025
2a91e42
types
Brendonovich Oct 10, 2025
b489244
update infra
Brendonovich Oct 13, 2025
96a6aa0
workflow secret
Brendonovich Oct 13, 2025
bf1d661
Merge branch 'main' into staging
Brendonovich Oct 13, 2025
9435124
remove aws keys from ecs
Oct 13, 2025
039b88b
update deps
Brendonovich Oct 13, 2025
9e02df8
reorganise error handling a bit
Brendonovich Oct 13, 2025
c512587
fixup envs
Brendonovich Oct 13, 2025
1cdc863
fix handleDomainError
Brendonovich Oct 13, 2025
71f9f74
log if no rpc auth header
Brendonovich Oct 13, 2025
3052627
remove some NEXT_PUBLIC envs + add s3 buffer
Brendonovich Oct 13, 2025
0c9d8db
fix ECS credentials
Brendonovich Oct 13, 2025
7ebeb56
formatting
Brendonovich Oct 13, 2025
4edf301
restrict loom import to team
Brendonovich Oct 14, 2025
a6024a5
Merge branch 'main' into staging
Brendonovich Oct 14, 2025
21f5b0f
remove unused env
Brendonovich Oct 14, 2025
3e0132a
add AwsCredentials service
Brendonovich Oct 14, 2025
311c839
restore workflow cluster to sst config
Brendonovich Oct 14, 2025
cb09c4a
Merge branch 'main' into staging
Brendonovich Oct 14, 2025
148f18d
add WEB_URL back
Brendonovich Oct 14, 2025
5dace4a
reuse oidc in staging
Oct 14, 2025
c698566
formatting
Brendonovich Oct 14, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 4 additions & 5 deletions apps/web/actions/video/upload.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ import { nanoId } from "@cap/database/helpers";
import { s3Buckets, videos, videoUploads } from "@cap/database/schema";
import { buildEnv, NODE_ENV, serverEnv } from "@cap/env";
import { dub, userIsPro } from "@cap/utils";
import { S3Buckets } from "@cap/web-backend";
import { AwsCredentials, S3Buckets } from "@cap/web-backend";
import { type Folder, type Organisation, Video } from "@cap/web-domain";
import { eq } from "drizzle-orm";
import { Effect, Option } from "effect";
Expand Down Expand Up @@ -60,10 +60,9 @@ async function getVideoUploadPresignedUrl({
if (distributionId) {
const cloudfront = new CloudFrontClient({
region: serverEnv().CAP_AWS_REGION || "us-east-1",
credentials: {
accessKeyId: serverEnv().CAP_AWS_ACCESS_KEY || "",
secretAccessKey: serverEnv().CAP_AWS_SECRET_KEY || "",
},
credentials: await runPromise(
Effect.map(AwsCredentials, (c) => c.credentials),
),
});

const pathToInvalidate = "/" + fileKey;
Expand Down
9 changes: 4 additions & 5 deletions apps/web/app/api/upload/[...route]/signed.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import type { PresignedPost } from "@aws-sdk/s3-presigned-post";
import { db, updateIfDefined } from "@cap/database";
import { s3Buckets, videos } from "@cap/database/schema";
import { serverEnv } from "@cap/env";
import { S3Buckets } from "@cap/web-backend";
import { AwsCredentials, S3Buckets } from "@cap/web-backend";
import { Video } from "@cap/web-domain";
import { zValidator } from "@hono/zod-validator";
import { and, eq } from "drizzle-orm";
Expand Down Expand Up @@ -73,10 +73,9 @@ app.post(

const cloudfront = new CloudFrontClient({
region: serverEnv().CAP_AWS_REGION || "us-east-1",
credentials: {
accessKeyId: serverEnv().CAP_AWS_ACCESS_KEY || "",
secretAccessKey: serverEnv().CAP_AWS_SECRET_KEY || "",
},
credentials: await runPromise(
Effect.map(AwsCredentials, (c) => c.credentials),
),
});

const pathToInvalidate = "/" + fileKey;
Expand Down
7 changes: 4 additions & 3 deletions apps/web/app/api/webhooks/stripe/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -115,9 +115,10 @@ export const POST = async (req: Request) => {
console.log("Webhook received");
const buf = await req.text();
const sig = req.headers.get("Stripe-Signature") as string;
const webhookSecret = serverEnv().VERCEL_ENV === "production"
? serverEnv().STRIPE_WEBHOOK_SECRET_LIVE
: serverEnv().STRIPE_WEBHOOK_SECRET_TEST;
const webhookSecret =
serverEnv().VERCEL_ENV === "production"
? serverEnv().STRIPE_WEBHOOK_SECRET_LIVE
: serverEnv().STRIPE_WEBHOOK_SECRET_TEST;
let event: Stripe.Event;

try {
Expand Down
2 changes: 2 additions & 0 deletions apps/web/lib/server.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ import "server-only";

import { decrypt } from "@cap/database/crypto";
import {
AwsCredentials,
Database,
Folders,
HttpAuthMiddlewareLive,
Expand Down Expand Up @@ -104,6 +105,7 @@ export const Dependencies = Layer.mergeAll(
SpacesPolicy.Default,
OrganisationsPolicy.Default,
Spaces.Default,
AwsCredentials.Default,
WorkflowRpcLive,
layerTracer,
).pipe(
Expand Down
40 changes: 39 additions & 1 deletion infra/sst-env.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,48 @@

declare module "sst" {
export interface Resource {
DATABASE_URL: {
AuroraDB: {
clusterArn: string;
database: string;
host: string;
password: string;
port: number;
reader: string;
secretArn: string;
type: "sst.aws.Aurora";
username: string;
};
CAP_AWS_ACCESS_KEY: {
type: "sst.sst.Secret";
value: string;
};
CAP_AWS_SECRET_KEY: {
type: "sst.sst.Secret";
value: string;
};
DATABASE_URL_MYSQL: {
type: "sst.sst.Secret";
value: string;
};
GITHUB_PAT: {
type: "sst.sst.Secret";
value: string;
};
MyApi: {
type: "sst.aws.ApiGatewayV2";
url: string;
};
Runner: {
service: string;
type: "sst.aws.Service";
};
ShardManager: {
service: string;
type: "sst.aws.Service";
};
Vpc: {
type: "sst.aws.Vpc";
};
}
}
/// <reference path="sst-env.d.ts" />
Expand Down
83 changes: 43 additions & 40 deletions infra/sst.config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -45,13 +45,16 @@ export default $config({
const secrets = Secrets();
// const planetscale = Planetscale();

const recordingsBucket = new aws.s3.BucketV2("RecordingsBucket");
const recordingsBucket = new aws.s3.BucketV2(
"RecordingsBucket",
{},
{ retainOnDelete: true },
);
Comment on lines +48 to +52
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

S3 bucket lacks security hardening (public access block, SSE, ownership controls).

Harden defaults for the recordings bucket.

Keep the bucket instantiation, and add these resources below it:

new aws.s3.BucketPublicAccessBlock("RecordingsBucketPublicAccess", {
  bucket: recordingsBucket.id,
  blockPublicAcls: true,
  ignorePublicAcls: true,
  blockPublicPolicy: true,
  restrictPublicBuckets: true,
});

new aws.s3.BucketOwnershipControls("RecordingsBucketOwnership", {
  bucket: recordingsBucket.id,
  rule: { objectOwnership: "BucketOwnerEnforced" },
});

new aws.s3.BucketServerSideEncryptionConfigurationV2("RecordingsBucketSSE", {
  bucket: recordingsBucket.id,
  rules: [
    {
      applyServerSideEncryptionByDefault: { sseAlgorithm: "AES256" },
      bucketKeyEnabled: true,
    },
  ],
});

// Optional but recommended:
new aws.s3.BucketVersioningV2("RecordingsBucketVersioning", {
  bucket: recordingsBucket.id,
  versioningConfiguration: { status: "Enabled" },
});
🤖 Prompt for AI Agents
In infra/sst.config.ts around lines 48 to 52, the RecordingsBucket is created
without S3 security hardening; add resources immediately after the bucket to
enforce Block Public Access (blockPublicAcls, ignorePublicAcls,
blockPublicPolicy, restrictPublicBuckets), set Bucket Ownership Controls to
BucketOwnerEnforced, configure Server-Side Encryption (AES256 with bucket key
enabled), and optionally enable Versioning; reference recordingsBucket.id for
each new aws.s3 resource so the bucket gets public access blocked, ownership
enforced, SSE configured, and versioning enabled.


const vercelVariables = [
{ key: "NEXT_PUBLIC_AXIOM_TOKEN", value: AXIOM_API_TOKEN },
{ key: "NEXT_PUBLIC_AXIOM_DATASET", value: AXIOM_DATASET },
{ key: "CAP_AWS_BUCKET", value: recordingsBucket.bucket },
{ key: "NEXT_PUBLIC_CAP_AWS_BUCKET", value: recordingsBucket.bucket },
{ key: "DATABASE_URL", value: secrets.DATABASE_URL_MYSQL.value },
];
Comment on lines 54 to 59
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion | 🟠 Major

Use secret-backed AXIOM token for Vercel env like other sensitive values.

Replace the direct constant usage with the Secret value.

-  { key: "NEXT_PUBLIC_AXIOM_TOKEN", value: AXIOM_API_TOKEN },
+  { key: "NEXT_PUBLIC_AXIOM_TOKEN", value: secrets.AXIOM_API_TOKEN.value },

Note: Although labeled “NEXT_PUBLIC”, avoid committing the token in code; source-of-truth via Secret prevents accidental reuse and rotation issues.

📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
const vercelVariables = [
{ key: "NEXT_PUBLIC_AXIOM_TOKEN", value: AXIOM_API_TOKEN },
{ key: "NEXT_PUBLIC_AXIOM_DATASET", value: AXIOM_DATASET },
{ key: "CAP_AWS_BUCKET", value: recordingsBucket.bucket },
{ key: "NEXT_PUBLIC_CAP_AWS_BUCKET", value: recordingsBucket.bucket },
{ key: "DATABASE_URL", value: secrets.DATABASE_URL_MYSQL.value },
];
const vercelVariables = [
{ key: "NEXT_PUBLIC_AXIOM_TOKEN", value: secrets.AXIOM_API_TOKEN.value },
{ key: "NEXT_PUBLIC_AXIOM_DATASET", value: AXIOM_DATASET },
{ key: "CAP_AWS_BUCKET", value: recordingsBucket.bucket },
{ key: "DATABASE_URL", value: secrets.DATABASE_URL_MYSQL.value },
];
🤖 Prompt for AI Agents
In infra/sst.config.ts around lines 54 to 59, the Vercel env currently uses the
plain AXIOM_API_TOKEN constant; replace that with the secret-backed value (e.g.,
secrets.AXIOM_API_TOKEN.value) so the token is sourced from the Secrets object
like DATABASE_URL_MYSQL.value, preventing accidental commit and enabling
rotation — keep the key name as "NEXT_PUBLIC_AXIOM_TOKEN" but use the secret
value reference instead of the constant.


Expand All @@ -60,21 +63,21 @@ export default $config({
// status: "Enabled",
// });

// const cloudfrontDistribution = aws.cloudfront.getDistributionOutput({
// id: "E36XSZEM0VIIYB",
// });
const cloudfrontDistribution =
$app.stage === "production"
? aws.cloudfront.getDistributionOutput({ id: "E36XSZEM0VIIYB" })
: null;

const vercelUser = new aws.iam.User("VercelUser", { forceDestroy: false });

const vercelProject = vercel.getProjectOutput({ name: "cap-web" });

if (webUrl) {
if (webUrl)
vercelVariables.push(
{ key: "WEB_URL", value: webUrl },
{ key: "NEXT_PUBLIC_WEB_URL", value: webUrl },
{ key: "NEXTAUTH_URL", value: webUrl },
);
}

// vercelEnvVar("VercelCloudfrontEnv", {
// key: "CAP_CLOUDFRONT_DISTRIBUTION_ID",
Expand All @@ -89,16 +92,14 @@ export default $config({
return {
aud,
url,
provider: await aws.iam
.getOpenIdConnectProvider({ url: `https://${url}` })
.catch(
() =>
new aws.iam.OpenIdConnectProvider(
provider:
$app.stage === "production" || $app.stage === "staging"
? aws.iam.getOpenIdConnectProviderOutput({ url: `https://${url}` })
: new aws.iam.OpenIdConnectProvider(
"VercelAWSOIDC",
{ url: `https://${url}`, clientIdLists: [aud] },
{ retainOnDelete: true },
),
),
};
})();

Expand All @@ -118,7 +119,7 @@ export default $config({
},
StringLike: {
[`${oidc.url}:sub`]: [
`owner:${VERCEL_TEAM_SLUG}:project:*:environment:staging`,
`owner:${VERCEL_TEAM_SLUG}:project:*:environment:${$app.stage}`,
],
Comment on lines 120 to 123
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

🧩 Analysis chain

Narrow OIDC sub condition to least privilege (avoid project:*).

Scope sub to specific project(s) to reduce blast radius.

If the project name is cap-web, prefer:

- [`${oidc.url}:sub`]: [`owner:${VERCEL_TEAM_SLUG}:project:*:environment:${$app.stage}`]
+ [`${oidc.url}:sub`]: [`owner:${VERCEL_TEAM_SLUG}:project:cap-web:environment:${$app.stage}`]

Verify the exact sub format per Vercel OIDC docs for role assumptions.


🌐 Web query:

What is the exact `sub` (subject) format emitted by Vercel OIDC for deployments? How to scope IAM role trust to a single project and environment?

💡 Result:

Exact sub format

  • sub = "owner:[TEAM_SLUG]:project:[PROJECT_NAME]:environment:[ENVIRONMENT]". Example: "owner:acme:project:acme_website:environment:production". [1]

How to scope an IAM role trust to a single Vercel project + environment

{
"Version":"2012-10-17",
"Statement":[
{
"Effect":"Allow",
"Principal":{
"Federated":"arn:aws:iam::[ACCOUNT_ID]:oidc-provider/oidc.vercel.com/[TEAM_SLUG]"
},
"Action":"sts:AssumeRoleWithWebIdentity",
"Condition":{
"StringEquals":{
"oidc.vercel.com/[TEAM_SLUG]:aud":"https://vercel.com/[TEAM_SLUG]",
"oidc.vercel.com/[TEAM_SLUG]:sub":"owner:[TEAM_SLUG]:project:[PROJECT_NAME]:environment:[ENVIRONMENT]"
}
}
}
]
}

  • Notes: use StringLike with wildcards if you want to allow multiple projects/environments; for team→global issuer differences see Vercel docs. [1][2]

Sources

  • Vercel OIDC reference and AWS guide. [1][2]

Narrow OIDC sub to Specific Project
Replace the wildcard with the exact project name to enforce least privilege. For example, for cap-web:

- [`${oidc.url}:sub`]: [`owner:${VERCEL_TEAM_SLUG}:project:*:environment:${$app.stage}`]
+ [`${oidc.url}:sub`]: [`owner:${VERCEL_TEAM_SLUG}:project:cap-web:environment:${$app.stage}`]
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
StringLike: {
[`${oidc.url}:sub`]: [
`owner:${VERCEL_TEAM_SLUG}:project:*:environment:staging`,
`owner:${VERCEL_TEAM_SLUG}:project:*:environment:${$app.stage}`,
],
StringLike: {
[`${oidc.url}:sub`]: [
`owner:${VERCEL_TEAM_SLUG}:project:cap-web:environment:${$app.stage}`,
],
🤖 Prompt for AI Agents
In infra/sst.config.ts around lines 120 to 123, the OIDC StringLike claim
currently uses a wildcard for the Vercel project which is overly broad; replace
the `project:*` segment with the exact project slug (e.g., `project:cap-web`) or
with a specific env var like `project:${VERCEL_PROJECT_NAME}` so the claim
becomes `owner:${VERCEL_TEAM_SLUG}:project:cap-web:environment:${$app.stage}`
(or equivalent using the env var), and verify the project slug matches the
actual Vercel project name.

},
},
Expand All @@ -128,40 +129,57 @@ export default $config({
inlinePolicies: [
{
name: "VercelAWSAccessPolicy",
policy: recordingsBucket.arn.apply((arn) =>
policy: $resolve([
recordingsBucket.arn,
cloudfrontDistribution?.arn,
] as const).apply(([bucketArn, cloudfrontArn]) =>
JSON.stringify({
Version: "2012-10-17",
Statement: [
{
Effect: "Allow",
Action: ["s3:*"],
Resource: `${arn}/*`,
Resource: `${bucketArn}/*`,
},
{
Effect: "Allow",
Action: ["s3:*"],
Resource: `${arn}`,
Resource: bucketArn,
},
],
cloudfrontArn && {
Effect: "Allow",
Action: ["cloudfront:CreateInvalidation"],
Resource: cloudfrontArn,
},
].filter(Boolean),
}),
),
},
Comment on lines +132 to 157
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion | 🟠 Major

🧩 Analysis chain

IAM policy hardening: principle of least privilege and CloudFront resource scoping.

  • S3 permissions are s3:* on bucket and objects. Limit to required actions (e.g., GetObject, PutObject, DeleteObject, ListBucket) to reduce risk.
  • Confirm whether cloudfront:CreateInvalidation supports resource-level ARNs. If not, set Resource: "*" (typical for CloudFront invalidations).

Apply this diff to scope actions; adjust CloudFront after verification:

-          Action: ["s3:*"],
+          Action: ["s3:ListBucket"],
           Resource: ${bucketArn},
         },
         {
           Effect: "Allow",
-          Action: ["s3:*"],
+          Action: ["s3:GetObject","s3:PutObject","s3:DeleteObject"],
           Resource: `${bucketArn}/*`,
         },
-        cloudfrontArn && {
-          Effect: "Allow",
-          Action: ["cloudfront:CreateInvalidation"],
-          Resource: cloudfrontArn,
-        },
+        // After verification, either keep ARN if supported or use "*"
+        cloudfrontArn && {
+          Effect: "Allow",
+          Action: ["cloudfront:CreateInvalidation"],
+          Resource: cloudfrontArn, // replace with "*" if ARNs aren't supported
+        },

Also, ensure $resolve([recordingsBucket.arn, cloudfrontDistribution?.arn]) tolerates undefined. If not, wrap with an explicit nullable Output or branch before $resolve.


🌐 Web query:

Does AWS IAM support resource-level permissions for `cloudfront:CreateInvalidation`, or must `Resource` be `"*"`?

💡 Result:

Yes — cloudfront:CreateInvalidation supports resource-level permissions. The Service Authorization Reference shows CreateInvalidation requires the distribution resource (arn:...:distribution/...) so you can restrict Resource to that distribution ARN instead of "*". [1][2]

Sources:

  • AWS Service Authorization Reference — CloudFront (CreateInvalidation → resource type: distribution*). [1]
  • CloudFront IAM / Developer Guide (how resource-level permissions work for CloudFront). [2]

Scope S3 actions and narrow CloudFront invalidation resource

  • Restrict S3 to s3:ListBucket on ${bucketArn} and s3:GetObject,s3:PutObject,s3:DeleteObject on ${bucketArn}/*.
  • CloudFront invalidation supports distribution ARNs—keep Resource: cloudfrontArn.
  • Ensure $resolve([recordingsBucket.arn, cloudfrontDistribution?.arn]) handles undefined.
         {
           Effect: "Allow",
-          Action: ["s3:*"],
+          Action: ["s3:ListBucket"],
           Resource: ${bucketArn},
         },
         {
           Effect: "Allow",
-          Action: ["s3:*"],
+          Action: ["s3:GetObject", "s3:PutObject", "s3:DeleteObject"],
           Resource: `${bucketArn}/*`,
         },
-        cloudfrontArn && {
-          Effect: "Allow",
-          Action: ["cloudfront:CreateInvalidation"],
-          Resource: cloudfrontArn,
-        },
+        cloudfrontArn && {
+          Effect: "Allow",
+          Action: ["cloudfront:CreateInvalidation"],
+          Resource: cloudfrontArn,
+        },

Committable suggestion skipped: line range outside the PR's diff.

🤖 Prompt for AI Agents
In infra/sst.config.ts around lines 132 to 157, the IAM policy is overly
permissive and may break when cloudfrontDistribution?.arn is undefined; update
the policy to (1) replace the broad "s3:*" permissions with two statements: one
granting "s3:ListBucket" on the bucket ARN (Resource: bucketArn) and another
granting "s3:GetObject","s3:PutObject","s3:DeleteObject" on the objects path
(Resource: `${bucketArn}/*`), (2) keep the CloudFront invalidation statement as
Effect: "Allow", Action: ["cloudfront:CreateInvalidation"], Resource:
cloudfrontArn (do not convert to wildcard), and (3) ensure the $resolve([...])
call and its apply callback handle undefined cloudfrontArn safely (e.g., include
cloudfrontArn in the array and only push the CloudFront statement when
cloudfrontArn is truthy) so the policy JSON never contains undefined.

],
});

const workflowCluster = await WorkflowCluster(recordingsBucket, secrets);
const workflowCluster =
$app.stage === "staging"
? await WorkflowCluster(recordingsBucket, secrets)
: null;

if ($app.stage === "staging" || $app.stage === "production") {
[
...vercelVariables,
{ key: "WORKFLOWS_RPC_URL", value: workflowCluster.api.url },
{
workflowCluster && {
key: "WORKFLOWS_RPC_URL",
value: workflowCluster.api.url,
},
workflowCluster && {
key: "WORKFLOWS_RPC_SECRET",
value: secrets.WORKFLOWS_RPC_SECRET.result,
},
{ key: "VERCEL_AWS_ROLE_ARN", value: vercelAwsAccessRole.arn },
].map(
(v) =>
]
.filter(Boolean)
.forEach((_v) => {
const v = _v as NonNullable<typeof _v>;

new vercel.ProjectEnvironmentVariable(`VercelEnv${v.key}`, {
...v,
projectId: vercelProject.id,
Expand All @@ -171,8 +189,8 @@ export default $config({
: undefined,
targets:
$app.stage === "staging" ? undefined : ["preview", "production"],
}),
);
});
});
}

// DiscordBot();
Expand All @@ -193,21 +211,6 @@ function Secrets() {

type Secrets = ReturnType<typeof Secrets>;

// function Planetscale() {
// const org = planetscale.getOrganizationOutput({ name: "cap" });
// const db = planetscale.getDatabaseOutput({
// name: "cap-production",
// organization: org.name,
// });
// const branch = planetscale.getBranchOutput({
// name: $app.stage === "production" ? "main" : "staging",
// database: db.name,
// organization: org.name,
// });

// return { org, db, branch };
// }

// function DiscordBot() {
// new sst.cloudflare.Worker("DiscordBotScript", {
// handler: "../apps/discord-bot/src/index.ts",
Expand Down
43 changes: 43 additions & 0 deletions packages/web-backend/src/Aws.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import { fromContainerMetadata, fromSSO } from "@aws-sdk/credential-providers";
import type {
AwsCredentialIdentity,
AwsCredentialIdentityProvider,
} from "@smithy/types";
import { awsCredentialsProvider } from "@vercel/functions/oidc";
import { Config, Effect, Option } from "effect";

export class AwsCredentials extends Effect.Service<AwsCredentials>()(
"AwsCredentials",
{
effect: Effect.gen(function* () {
let credentials: AwsCredentialIdentity | AwsCredentialIdentityProvider;

const accessKeys = yield* Config.option(
Config.all([
Config.string("CAP_AWS_ACCESS_KEY"),
Config.string("CAP_AWS_SECRET_KEY"),
]),
);
const vercelAwsRole = yield* Config.option(
Config.string("VERCEL_AWS_ROLE_ARN"),
);

if (Option.isSome(accessKeys)) {
const [accessKeyId, secretAccessKey] = accessKeys.value;
yield* Effect.log("Using CAP_AWS_ACCESS_KEY and CAP_AWS_SECRET_KEY");
credentials = { accessKeyId, secretAccessKey };
} else if (Option.isSome(vercelAwsRole)) {
yield* Effect.log("Using VERCEL_AWS_ROLE_ARN");
credentials = awsCredentialsProvider({ roleArn: vercelAwsRole.value });
} else if (process.env.NODE_ENV === "development") {
yield* Effect.log("Using AWS_DEFAULT_PROFILE");
credentials = fromSSO({ profile: process.env.AWS_DEFAULT_PROFILE });
} else {
yield* Effect.log("Falling back to ECS metadata");
credentials = fromContainerMetadata();
}

return { credentials };
}),
},
) {}
Loading