+ >
+ )
+ return (
+
+ )
+}
diff --git a/app/components/HelpGrid/helpgrid.css b/app/components/HelpGrid/helpgrid.css
new file mode 100644
index 00000000..bedb78bb
--- /dev/null
+++ b/app/components/HelpGrid/helpgrid.css
@@ -0,0 +1,11 @@
+.help-grid {
+ display: grid;
+ grid-template-columns: repeat(2, 1fr);
+ gap: var(--spacing-32);
+}
+
+@media (max-width: 780px) {
+ .help-grid {
+ grid-template-columns: 1fr;
+ }
+}
diff --git a/app/components/HelpGrid/index.tsx b/app/components/HelpGrid/index.tsx
new file mode 100644
index 00000000..f0e81975
--- /dev/null
+++ b/app/components/HelpGrid/index.tsx
@@ -0,0 +1,62 @@
+import './helpgrid.css'
+import Card from '../Card'
+
+const helpItems = [
+ {
+ title: 'Start a career in AI Safety',
+ description:
+ 'For both technical and non-technical roles in AI alignment, governance, and field-building',
+ impact: 'Highest direct impact',
+ icon: 'briefcase',
+ route: '/howcanihelppage/career',
+ },
+ {
+ title: 'Spread the word & grassroots activism',
+ description: 'For anyone—help us spread the word about this issue',
+ impact: 'Quickest & most accessible',
+ icon: 'megaphone',
+ route: '/howcanihelppage/grassroots',
+ },
+ {
+ title: 'Donate',
+ description:
+ 'The AI safety field is constrained by funding—financial help is critical at this moment',
+ impact: 'Highest indirect impact',
+ icon: 'piggy-bank',
+ route: '/howcanihelppage/donate',
+ },
+ {
+ title: 'Volunteer',
+ description:
+ 'Help us build important AI safety infrastructure—all skill sets and levels of time-commitment are wanted',
+ impact: 'Best for partial commitment',
+ icon: 'hand-heart',
+ route: '/howcanihelppage/volunteer',
+ },
+ {
+ title: 'Build your knowledge',
+ description:
+ 'Learning about AI safety equips you to effectively contribute to discussions and influence its development',
+ impact: 'Enables impact',
+ icon: 'book',
+ route: '/howcanihelppage/knowledge',
+ },
+ {
+ title: 'Join a community',
+ description:
+ 'Joining a community is motivating, and will help guide your efforts to contribute and get involved',
+ impact: 'Enables impact',
+ icon: 'people',
+ route: '/howcanihelppage/community',
+ },
+]
+
+export default function HelpGrid() {
+ return (
+
+ {helpItems.map((item, index) => (
+
+ ))}
+
+ )
+}
diff --git a/app/components/HowCanIHelp/Career.tsx b/app/components/HowCanIHelp/Career.tsx
new file mode 100644
index 00000000..f34c3760
--- /dev/null
+++ b/app/components/HowCanIHelp/Career.tsx
@@ -0,0 +1,73 @@
+import CardSmall from '../CardSmall'
+import './career.css'
+
+export default function Career() {
+ return (
+
+
Start a career
+
There are 3 major career paths in AI safety:
+
+
+
+
+
+
+
AI alignment research
+
+
+
What
+
+ AI alignment research is the field dedicated to ensuring that advanced artificial
+ intelligence systems act in ways that are beneficial to humans and aligned with human
+ values and goals. It involves developing methods and principles to guide AI behavior so
+ that as these systems become more capable and autonomous, they continue to operate
+ safely and ethically within the intentions set by their human creators.
+
+
+
+
Why this is important
+
+ To ensure humanity benefits from advanced AI and mitigates risks—like unintended
+ behaviors or misalignment with human values—we must first solve the technical challenge
+ of AI alignment through dedicated research, and then collaborate globally to carefully
+ deploy solutions. While experts believe alignment is solvable, it remains a complex
+ problem that demands significant high-quality intellectual talent.
+
+
+
+
Where AI alignment researchers work
+
+ AI alignment researchers typically work at non-profit organizations dedicated to AI
+ safety and alignment; in academia (i.e. universities and academic institutions);
+ independently; or on industry safety teams*, usually at major AI companies like OpenAI.
+
+
+ *Note: Beware of the risk of joining "safety" teams, as this work often leaks to
+ non-safety parts of the organization which improves the AI technology itself—and so ends
+ up causing harm.
+
+
+
+
You might be a good fit if...
+
+ You might be a good fit as an AI alignment researcher if you have a quantitative
+ background, you enjoy programming, or you're skilled at breaking down problems
+ logically, hypothesizing, and testing various solutions with high attention to detail.
+
+ )
+}
diff --git a/app/components/HelpGrid/index.tsx b/app/components/HelpGrid/index.tsx
index f0e81975..da7bebd2 100644
--- a/app/components/HelpGrid/index.tsx
+++ b/app/components/HelpGrid/index.tsx
@@ -1,5 +1,6 @@
import './helpgrid.css'
import Card from '../Card'
+import {Briefcase, Megaphone, PiggyBank, Hand, Book, People} from '../icons-generated'
const helpItems = [
{
@@ -7,14 +8,14 @@ const helpItems = [
description:
'For both technical and non-technical roles in AI alignment, governance, and field-building',
impact: 'Highest direct impact',
- icon: 'briefcase',
+ icon: Briefcase,
route: '/howcanihelppage/career',
},
{
title: 'Spread the word & grassroots activism',
description: 'For anyone—help us spread the word about this issue',
impact: 'Quickest & most accessible',
- icon: 'megaphone',
+ icon: Megaphone,
route: '/howcanihelppage/grassroots',
},
{
@@ -22,7 +23,7 @@ const helpItems = [
description:
'The AI safety field is constrained by funding—financial help is critical at this moment',
impact: 'Highest indirect impact',
- icon: 'piggy-bank',
+ icon: PiggyBank,
route: '/howcanihelppage/donate',
},
{
@@ -30,7 +31,7 @@ const helpItems = [
description:
'Help us build important AI safety infrastructure—all skill sets and levels of time-commitment are wanted',
impact: 'Best for partial commitment',
- icon: 'hand-heart',
+ icon: Hand,
route: '/howcanihelppage/volunteer',
},
{
@@ -38,7 +39,7 @@ const helpItems = [
description:
'Learning about AI safety equips you to effectively contribute to discussions and influence its development',
impact: 'Enables impact',
- icon: 'book',
+ icon: Book,
route: '/howcanihelppage/knowledge',
},
{
@@ -46,7 +47,7 @@ const helpItems = [
description:
'Joining a community is motivating, and will help guide your efforts to contribute and get involved',
impact: 'Enables impact',
- icon: 'people',
+ icon: People,
route: '/howcanihelppage/community',
},
]
diff --git a/app/components/HowCanIHelp/Career.tsx b/app/components/HowCanIHelp/Career.tsx
index f34c3760..e2460672 100644
--- a/app/components/HowCanIHelp/Career.tsx
+++ b/app/components/HowCanIHelp/Career.tsx
@@ -1,71 +1,477 @@
+import Card from '../Card'
import CardSmall from '../CardSmall'
+import DropDown from '../DropDown'
+import {Microscope, GovermentBuilding, PuzzlePieces} from '../icons-generated'
+import LinkCard from '../LinkCard'
import './career.css'
export default function Career() {
return (
Start a career
-
There are 3 major career paths in AI safety:
+
There are 3 major career paths in AI safety:
-
-
AI alignment research
-
-
-
What
-
- AI alignment research is the field dedicated to ensuring that advanced artificial
- intelligence systems act in ways that are beneficial to humans and aligned with human
- values and goals. It involves developing methods and principles to guide AI behavior so
- that as these systems become more capable and autonomous, they continue to operate
- safely and ethically within the intentions set by their human creators.
-
+
+
AI alignment research
+
+
+
What
+
+ AI alignment research is the field dedicated to ensuring that advanced artificial
+ intelligence systems act in ways that are beneficial to humans and aligned with human
+ values and goals. It involves developing methods and principles to guide AI behavior
+ so that as these systems become more capable and autonomous, they continue to operate
+ safely and ethically within the intentions set by their human creators.
+
+
+
+
Why this is important
+
+ To ensure humanity benefits from advanced AI and mitigates risks—like unintended
+ behaviors or misalignment with human values—we must first solve the technical
+ challenge of AI alignment through dedicated research, and then collaborate globally to
+ carefully deploy solutions. While experts believe alignment is solvable, it remains a
+ complex problem that demands significant high-quality intellectual talent.
+
+
+
+
Where AI alignment researchers work
+
+ AI alignment researchers typically work at non-profit organizations dedicated to AI
+ safety and alignment; in academia (i.e. universities and academic institutions);
+ independently; or on industry safety teams*, usually at major AI companies like
+ OpenAI.
+
+
+ *Note: Beware of the risk of joining "safety" teams, as this work often leaks to
+ non-safety parts of the organization which improves the AI technology itself—and so
+ ends up causing harm.
+
+
+
+
You might be a good fit if...
+
+ You might be a good fit as an AI alignment researcher if you have a quantitative
+ background, you enjoy programming, or you're skilled at breaking down problems
+ logically, hypothesizing, and testing various solutions with high attention to detail.
+
+
-
-
Why this is important
-
- To ensure humanity benefits from advanced AI and mitigates risks—like unintended
- behaviors or misalignment with human values—we must first solve the technical challenge
- of AI alignment through dedicated research, and then collaborate globally to carefully
- deploy solutions. While experts believe alignment is solvable, it remains a complex
- problem that demands significant high-quality intellectual talent.
+
+
+
+
Interested in pursuing this career path?
+
+ Take the following steps to (1) learn more & further assess your fit; (2) learn how to
+ make the transition
-
-
Where AI alignment researchers work
-
- AI alignment researchers typically work at non-profit organizations dedicated to AI
- safety and alignment; in academia (i.e. universities and academic institutions);
- independently; or on industry safety teams*, usually at major AI companies like OpenAI.
-
-
- *Note: Beware of the risk of joining "safety" teams, as this work often leaks to
- non-safety parts of the organization which improves the AI technology itself—and so ends
- up causing harm.
-
+
+
+
+ Read the 80,000 Hours Technical AI Safety Career Review
+
+
+
The review takes about one hour and addresses:
+
+
What this career path involves
+
How to predict your fit
+
The upsides and downsides of this career path
+
Compensation
+
How to enter or transition into this career
+
+
+
+
-
-
You might be a good fit if...
-
- You might be a good fit as an AI alignment researcher if you have a quantitative
- background, you enjoy programming, or you're skilled at breaking down problems
- logically, hypothesizing, and testing various solutions with high attention to detail.
-
+
+
+
+ Sign up for 1-on-1 career advice with AI Safety Quest & 80,000 Hours (free)
+
+
+ Schedule a 30-minute or 1-hour video call—we recommend booking both! These calls will
+ address your specific questions about the field, confirm your interest and fit, and
+ provide tailored recommendations to help you make the transition.
+
+
+ Note: 80,000 Hours does not accept all applicants.
+
+
+
+
+
+
+
+
+
+
+
+ A process note: Form your own understanding of the AI alignment technical challenge
+
+
+ AI safety is a relatively new field with diverse opinions on how best to solve the
+ technical challenge of AI alignment. Many unexplored avenues and important questions
+ likely remain unaddressed. Therefore, it's crucial for (aspiring) AI alignment
+ researchers to think independently and develop their own models on this topic. If you
+ pursue a career in this field, we recommend deeply educating yourself on the technical
+ challenge of alignment, engaging with other AI safety experts, and thinking critically
+ about the topic and current paradigms.
+
+
+
+
+
+
+
+
+
+
+
+
+
+ There are many roles that support the work of AI alignment researchers, and having
+ high-performing people in these roles is crucial. In a research organisation
+ around half of the staff will be doing other tasks essential for the organisation
+ to perform at its best and have an impact. Some of these roles include:
+
+
+
+
+
+ Operations management at an AI safety research organization
+
+
+ This involves overseeing the day-to-day activities that enable the organization
+ to function efficiently and effectively. Responsibilities may include
+ administrative support, resource allocation, HR, management of facilities, IT
+ support, project coordination, etc.
+
+
+
+
+
+
+
+
+
+ Research management AI safety research organization
+
+
+ This involves overseeing and coordinating research activities to ensure they
+ align with the mission of promoting safe AI development. Responsibilities
+ include setting research priorities, managing teams, allocating resources,
+ fostering collaboration, monitoring progress, and upholding ethical standards.
+
+
+
+
+
+
+
+
+
+ Being an executive assistant to an AI safety researcher
+
+
+ This involves managing administrative tasks to enhance this person’s
+ productivity. Responsibilities include scheduling meetings, handling
+ correspondence, coordinating travel, organizing events, and ensuring they can
+ focus on impactful AI safety efforts.
+
+
+
+
+
+
+
+ }
+ />
+
+
+
AI governance & policy
+
+
+
What
+
+ AI alignment research is the field dedicated to ensuring that advanced artificial
+ intelligence systems act in ways that are beneficial to humans and aligned with human
+ values and goals. It involves developing methods and principles to guide AI behavior
+ so that as these systems become more capable and autonomous, they continue to operate
+ safely and ethically within the intentions set by their human creators.
+
+
+
+
Why this is important
+
+ To ensure humanity benefits from advanced AI and mitigates risks—like unintended
+ behaviors or misalignment with human values—we must first solve the technical
+ challenge of AI alignment through dedicated research, and then collaborate globally to
+ carefully deploy solutions. While experts believe alignment is solvable, it remains a
+ complex problem that demands significant high-quality intellectual talent.
+
+
+
+
Where AI alignment researchers work
+
+ AI alignment researchers typically work at non-profit organizations dedicated to AI
+ safety and alignment; in academia (i.e. universities and academic institutions);
+ independently; or on industry safety teams*, usually at major AI companies like
+ OpenAI.
+
+
+
+
You might be a good fit if...
+
+ You might be a good fit as an AI alignment researcher if you have a quantitative
+ background, you enjoy programming, or you're skilled at breaking down problems
+ logically, hypothesizing, and testing various solutions with high attention to detail.
+
+
+
+
+
+
AI safety field-building
+
+
+
What
+
+ AI alignment research is the field dedicated to ensuring that advanced artificial
+ intelligence systems act in ways that are beneficial to humans and aligned with human
+ values and goals. It involves developing methods and principles to guide AI behavior
+ so that as these systems become more capable and autonomous, they continue to operate
+ safely and ethically within the intentions set by their human creators.
+
+
+
+
Why this is important
+
+ To ensure humanity benefits from advanced AI and mitigates risks—like unintended
+ behaviors or misalignment with human values—we must first solve the technical
+ challenge of AI alignment through dedicated research, and then collaborate globally to
+ carefully deploy solutions. While experts believe alignment is solvable, it remains a
+ complex problem that demands significant high-quality intellectual talent.
+
+
+
+
You might be a good fit if...
+
+ You might be a good fit as an AI alignment researcher if you have a quantitative
+ background, you enjoy programming, or you're skilled at breaking down problems
+ logically, hypothesizing, and testing various solutions with high attention to detail.
+
+
+
+
+
+
Most common field-building roles
+
+
+
Communications & advocacy
+
+ Communications involves educating the public or spreading the word about AI
+ safety—most typically through websites or social media. People with computer skills
+ and creative skills can typically find a place within communications. More
+ specifically, roles could include being an independent content producer, software
+ engineering, project management, or design.
+
+
+
+
+
+
+
+
+
Being a grantmaker
+
+ There are many philanthropists interested in donating millions of dollars to AI
+ safety—but there currently aren’t enough grantmakers able to vet funding proposals.
+ Because a randomly chosen proposal has little expected impact, grantmakers can have a
+ large impact by helping philanthropists distinguish promising projects in AI safety
+ from less promising ones.
+
+
+
+
+
+
+
+
+
Founding new projects
+
+ Founding a new project in AI safety involves identifying a gap in a pressing problem
+ area, formulating a solution, investigating it, and then helping to build an
+ organisation by investing in strategy, hiring, management, culture, and so on—ideally
+ building something that can continue without you.
+
+
+
+
+
+
+
+
+
+
+
Supporting roles
+
+
+ There are many roles that support the work of AI alignment researchers, and
+ having high-performing people in these roles is crucial. In a research
+ organisation around half of the staff will be doing other tasks essential for
+ the organisation to perform at its best and have an impact. Some of these roles
+ include:
+
+
+
+
+
+ Operations management at an AI safety research organization
+
+
+ This involves overseeing the day-to-day activities that enable the
+ organization to function efficiently and effectively. Responsibilities may
+ include administrative support, resource allocation, HR, management of
+ facilities, IT support, project coordination, etc.
+
+
+
+
+
+
+
+
+
+ Research management AI safety research organization
+
+
+ This involves overseeing and coordinating research activities to ensure they
+ align with the mission of promoting safe AI development. Responsibilities
+ include setting research priorities, managing teams, allocating resources,
+ fostering collaboration, monitoring progress, and upholding ethical standards.
+
+
+
+
+
+
+
+
+
+ Being an executive assistant to an AI safety researcher
+
+
+ This involves managing administrative tasks to enhance this person’s
+ productivity. Responsibilities include scheduling meetings, handling
+ correspondence, coordinating travel, organizing events, and ensuring they can
+ focus on impactful AI safety efforts.
+
+
+
+
+
+
+
+
+
Other technical roles
+
+
+
+ Working in information security to protect AI (or the results of key
+ experiments) from misuse, theft, or tampering
+
+
+ Lorem ipsum dolor sit amet consectetur. Sapien ullamcorper morbi habitasse
+ justo magna. Suspendisse nunc id lacus sit interdum sit.
+
+
+
+
+
+
+
+
+
+ Becoming an expert in AI hardware as a way of steering AI progress in safer
+ directions
+
+
+ Lorem ipsum dolor sit amet consectetur. Sapien ullamcorper morbi habitasse
+ justo magna. Suspendisse nunc id lacus sit interdum sit.
+
+
+
+
+
+
+
+
+ }
+ />
+
+
+
+ Multiply your impact: Support your career pursuit
+
- Working in information security to protect AI (or the results of key
+ Working in information security to protect AI (or the results of key
experiments) from misuse, theft, or tampering
@@ -417,7 +417,7 @@ export default function Career() {
- Becoming an expert in AI hardware as a way of steering AI progress in safer
+ Becoming an expert in AI hardware as a way of steering AI progress in safer
directions
@@ -426,7 +426,7 @@ export default function Career() {
-
+
@@ -462,7 +462,7 @@ export default function Career() {
/>
From e1d44f5c9fe8a3cf4f2f3bc388e0d246c42af15e Mon Sep 17 00:00:00 2001
From: Olivier Coutu
Date: Tue, 15 Oct 2024 01:08:39 -0400
Subject: [PATCH 05/12] First draft of Knowledge.tsx
---
app/components/HowCanIHelp/Knowledge.tsx | 239 ++++++++++++++++++++++-
1 file changed, 237 insertions(+), 2 deletions(-)
diff --git a/app/components/HowCanIHelp/Knowledge.tsx b/app/components/HowCanIHelp/Knowledge.tsx
index fa5cfbce..51c494d1 100644
--- a/app/components/HowCanIHelp/Knowledge.tsx
+++ b/app/components/HowCanIHelp/Knowledge.tsx
@@ -1,10 +1,245 @@
+import Card from '../Card'
+import CardSmall from '../CardSmall'
+import DropDown from '../DropDown'
+import {Microscope, GovermentBuilding, PuzzlePieces} from '../icons-generated'
+import LinkCard from '../LinkCard'
import './knowledge.css'
export default function Knowledge() {
return (
-
Share knowledge about AI Safety
- {/* Add content for the Knowledge page */}
+
Build your knowledge
+
If you're somewhat new to AI safety, we recommend an introductory overview
+
+
+
+
+
Browse our introductory content
+
+ Our website’s “Intro to AI Safety” micro-course includes several short readings that act as a
+ comprehensive introduction the topic of AI safety.
+
+
+ Our Intro to AI Safety video playlist illustrates many of the most important points about AI safety
+ in a way that is entertaining and easy to understand.
+
+
+
+
+
+
+ TODO Intro to AI Safety video playlist
+
+
+
+
+
+
+
+
+
Listen to an introductory podcast episode (or a few)
+
+ We recommend Dwarkesh Patel’s interview with Paul Christiano, a leading researcher in AI alignment and safety.
+ The interview provides an introduction to AI risk and discusses many important AI safety concepts.
+
+
+
+
+
+
+ Browse our full list of podcasts (TODO link)
+
+
+
+
+
+
+
+
I love books!
+
+
+ TODO I love books I'm a nerd
+ Melissa, what do we want to put in the modal, why not just link to this?
+ https://aisafety.info/questions/8159/What-are-some-good-books-about-AI-safety
+
+
+
+
+
+
+ Lorem ipsum
+
+
+ Lorem ipsum
+
+
+
+
+
+ }
+ />
+ {/* TODO How do I avoid the double dividor line? */}
+
+
+
I love books!
+
+
+ TODO idk what goes here
+
+
+
+
+
+
+ Lorem ipsum
+
+
+ Lorem ipsum
+
+
+
+
+
+ }
+ />
+
+
+
If you want to dive deeper
+
+
+
+
Take an online course
+
+ We recommend taking an online course if your interests have narrowed to a specific subset of
+ AI safety, such as AI alignment research or AI governance.
+
+
+ The AI Safety Fundamentals (AISF) Governance Course, for example, is especially suited
+ for policymakers and similar stakeholders interested in AI governance mechanisms.
+ It explores policy levers for steering the future of AI development.
+
+
+ The AISF Alignment Course is especially suited for people with a technical background
+ interested in AI alignment research. It explores research agendas for aligning AI systems with human interests.
+
+
+ Note: If you take the AISF courses, consider exploring additional views on AI safety to help avoid homogeneity
+ in the field, such as The Most Important Century blog post series.
+
+
+ Note: AISF courses do not accept all applicants, but we still recommend taking
+ the courses through self-study if your application is unsuccessful.
+
Get into Lesswrong and its subset, the Alignment Forum
+
+ Most people who are really into AI existential safety ultimately end up in this online,
+ forum-based community which fosters high-quality discussions about AI safety research and governance.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Sign up for events
+
+ Events, typically conferences and talks, are often held in person and last one to three days.
+
+
+ We've highlighted EAGx, an Effective Altruism conference dedicated to networking and learning
+ about important global issues, with a strong focus on AI safety. Several EAGx's are held
+ annually in various major cities across the world.
+
+ AI safety fellowships typically last one to three weeks and are offered both online and in person.
+ They focus on developing safe and ethical AI practices through research,
+ mentorship, and collaboration on innovative solutions.
+
- The AI safety movement is still relatively new, and your actions could have significant
- impact. Here's what you can do:
+ The AI movement is still relatively new, and your actions could have significant impact.
+ Here's what you can do:
- The AI movement is still relatively new, and your actions could have significant impact.
- Here's what you can do:
+ The AI safety movement is still relatively new, and your actions could have significant
+ impact. Here's what you can do:
If you're somewhat new to AI safety, we recommend an introductory overview
+
+ If you're somewhat new to AI safety, we recommend an introductory overview
+
-
Browse our introductory content
-
- Our website’s “Intro to AI Safety” micro-course includes several short readings that act as a
- comprehensive introduction the topic of AI safety.
-
-
- Our Intro to AI Safety video playlist illustrates many of the most important points about AI safety
- in a way that is entertaining and easy to understand.
-
+
Browse our introductory content
+
+ Our website’s “Intro to AI Safety” micro-course includes several short readings that
+ act as a comprehensive introduction the topic of AI safety.
+
+
+ Our Intro to AI Safety video playlist illustrates many of the most important points
+ about AI safety in a way that is entertaining and easy to understand.
+
-
- TODO Intro to AI Safety video playlist
-
+
TODO Intro to AI Safety video playlist
-
-
Listen to an introductory podcast episode (or a few)
-
- We recommend Dwarkesh Patel’s interview with Paul Christiano, a leading researcher in AI alignment and safety.
- The interview provides an introduction to AI risk and discusses many important AI safety concepts.
-
+
+ Listen to an introductory podcast episode (or a few)
+
+
+ We recommend Dwarkesh Patel’s interview with Paul Christiano, a leading researcher in
+ AI alignment and safety. The interview provides an introduction to AI risk and
+ discusses many important AI safety concepts.
+
-
+
-
- Browse our full list of podcasts (TODO link)
-
+
Browse our full list of podcasts (TODO link)
-
+
I love books!
- TODO I love books I'm a nerd
- Melissa, what do we want to put in the modal, why not just link to this?
+ TODO I love books I'm a nerd Melissa, what do we want to put in the modal, why
+ not just link to this?
https://aisafety.info/questions/8159/What-are-some-good-books-about-AI-safety
-
+
-
- Lorem ipsum
-
-
- Lorem ipsum
-
+
Lorem ipsum
+
Lorem ipsum
@@ -89,19 +88,13 @@ export default function Knowledge() {
I love books!
-
- TODO idk what goes here
-
+
TODO idk what goes here
-
+
-
- Lorem ipsum
-
-
- Lorem ipsum
-
+
Lorem ipsum
+
Lorem ipsum
@@ -114,28 +107,30 @@ export default function Knowledge() {
-
Take an online course
-
- We recommend taking an online course if your interests have narrowed to a specific subset of
- AI safety, such as AI alignment research or AI governance.
-
-
- The AI Safety Fundamentals (AISF) Governance Course, for example, is especially suited
- for policymakers and similar stakeholders interested in AI governance mechanisms.
- It explores policy levers for steering the future of AI development.
-
-
- The AISF Alignment Course is especially suited for people with a technical background
- interested in AI alignment research. It explores research agendas for aligning AI systems with human interests.
-
-
- Note: If you take the AISF courses, consider exploring additional views on AI safety to help avoid homogeneity
- in the field, such as The Most Important Century blog post series.
-
-
- Note: AISF courses do not accept all applicants, but we still recommend taking
- the courses through self-study if your application is unsuccessful.
-
+
Take an online course
+
+ We recommend taking an online course if your interests have narrowed to a specific
+ subset of AI safety, such as AI alignment research or AI governance.
+
+
+ The AI Safety Fundamentals (AISF) Governance Course, for example, is especially suited
+ for policymakers and similar stakeholders interested in AI governance mechanisms. It
+ explores policy levers for steering the future of AI development.
+
+
+ The AISF Alignment Course is especially suited for people with a technical background
+ interested in AI alignment research. It explores research agendas for aligning AI
+ systems with human interests.
+
+
+ Note: If you take the AISF courses, consider exploring additional views on AI safety
+ to help avoid homogeneity in the field, such as The Most Important Century blog post
+ series.
+
+
+ Note: AISF courses do not accept all applicants, but we still recommend taking the
+ courses through self-study if your application is unsuccessful.
+
@@ -143,7 +138,13 @@ export default function Knowledge() {
@@ -151,55 +152,70 @@ export default function Knowledge() {
-
Get into Lesswrong and its subset, the Alignment Forum
-
- Most people who are really into AI existential safety ultimately end up in this online,
- forum-based community which fosters high-quality discussions about AI safety research and governance.
-
+
+ Get into Lesswrong and its subset, the Alignment Forum
+
+
+ Most people who are really into AI existential safety ultimately end up in this
+ online, forum-based community which fosters high-quality discussions about AI safety
+ research and governance.
+
-
+
-
+
-
Sign up for events
-
- Events, typically conferences and talks, are often held in person and last one to three days.
-
-
- We've highlighted EAGx, an Effective Altruism conference dedicated to networking and learning
- about important global issues, with a strong focus on AI safety. Several EAGx's are held
- annually in various major cities across the world.
-
+
Sign up for events
+
+ Events, typically conferences and talks, are often held in person and last one to
+ three days.
+
+
+ We've highlighted EAGx, an Effective Altruism conference dedicated to networking and
+ learning about important global issues, with a strong focus on AI safety. Several
+ EAGx's are held annually in various major cities across the world.
+
- AI safety fellowships typically last one to three weeks and are offered both online and in person.
- They focus on developing safe and ethical AI practices through research,
- mentorship, and collaboration on innovative solutions.
-
+
Sign up for fellowships
+
+ AI safety fellowships typically last one to three weeks and are offered both online
+ and in person. They focus on developing safe and ethical AI practices through
+ research, mentorship, and collaboration on innovative solutions.
+
From 5550195d0bee0260035ec40e1c7a34d3e656f82c Mon Sep 17 00:00:00 2001
From: Olivier Coutu
Date: Fri, 18 Oct 2024 08:48:45 -0400
Subject: [PATCH 10/12] Update card route
---
app/components/HowCanIHelp/Knowledge.tsx | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/app/components/HowCanIHelp/Knowledge.tsx b/app/components/HowCanIHelp/Knowledge.tsx
index d4d877c3..2eee6199 100644
--- a/app/components/HowCanIHelp/Knowledge.tsx
+++ b/app/components/HowCanIHelp/Knowledge.tsx
@@ -237,14 +237,14 @@ export default function Knowledge() {
title="Start a career in AI Safety"
description="Help us build important AI safety infrastructure—all skill sets and levels of time—commitment are wanted"
icon={PuzzlePieces}
- route="/howcanihelppage/career"
+ action="/howcanihelppage/career"
impact={'TODO'}
/>
@@ -253,14 +253,14 @@ export default function Knowledge() {
title="Donate"
description="The AI safety field is constrained by funding—financial help is critical at this moment"
icon={PuzzlePieces}
- route="/howcanihelppage/donate"
+ action="/howcanihelppage/donate"
impact={'TODO'}
/>
From 0503a37a03b1a2f7703c503308b9f4b4007b19fc Mon Sep 17 00:00:00 2001
From: Olivier Coutu
Date: Fri, 18 Oct 2024 09:05:37 -0400
Subject: [PATCH 11/12] Remove unused imports
---
app/components/HowCanIHelp/Knowledge.tsx | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/app/components/HowCanIHelp/Knowledge.tsx b/app/components/HowCanIHelp/Knowledge.tsx
index 2eee6199..5205e418 100644
--- a/app/components/HowCanIHelp/Knowledge.tsx
+++ b/app/components/HowCanIHelp/Knowledge.tsx
@@ -1,7 +1,6 @@
import Card from '../Card'
-import CardSmall from '../CardSmall'
import DropDown from '../DropDown'
-import {Microscope, GovermentBuilding, PuzzlePieces} from '../icons-generated'
+import {PuzzlePieces} from '../icons-generated'
import LinkCard from '../LinkCard'
import './knowledge.css'
From fd24831314678e039147fa9789bc88540b0c86c8 Mon Sep 17 00:00:00 2001
From: Olivier Coutu
Date: Fri, 18 Oct 2024 09:17:02 -0400
Subject: [PATCH 12/12] Update CSS importation
---
app/components/HowCanIHelp/Knowledge.tsx | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/app/components/HowCanIHelp/Knowledge.tsx b/app/components/HowCanIHelp/Knowledge.tsx
index 5205e418..6065f8a4 100644
--- a/app/components/HowCanIHelp/Knowledge.tsx
+++ b/app/components/HowCanIHelp/Knowledge.tsx
@@ -2,7 +2,7 @@ import Card from '../Card'
import DropDown from '../DropDown'
import {PuzzlePieces} from '../icons-generated'
import LinkCard from '../LinkCard'
-import './knowledge.css'
+import './howcanihelp.css'
export default function Knowledge() {
return (