diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 5626db58938118068a0fd0c6ad8c587ae87dc950..e5a7ef5879d405a3d35291f3124ed13be79d4237 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -678,6 +678,125 @@ bool RISCVDAGToDAGISel::tryShrinkShlLogicImm(SDNode *Node) {
   return true;
 }
 
+/// isInt32Immediate - This method tests to see if the node is a 32-bit constant
+/// operand. If so Imm will receive the 32-bit value.
+static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
+  if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
+    Imm = cast<ConstantSDNode>(N)->getZExtValue();
+    return true;
+  }
+  return false;
+}
+
+// isInt32Immediate - This method tests to see if a constant operand.
+// If so Imm will receive the 32 bit value.
+static bool isInt32Immediate(SDValue N, unsigned &Imm) {
+  return isInt32Immediate(N.getNode(), Imm);
+}
+
+// isOpcWithIntImmediate - This method tests to see if the node is a specific
+// opcode and that it has a immediate integer right operand.
+// If so Imm will receive the 32 bit value.
+static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
+  return N->getOpcode() == Opc
+         && isInt32Immediate(N->getOperand(1).getNode(), Imm);
+}
+
+bool RISCVDAGToDAGISel::tryXCVbitmanipExtractOp(SDNode *N, bool IsSigned) {
+  if (!Subtarget->hasExtXcvbitmanip())
+    return false;
+  unsigned Opc = IsSigned ? RISCV::CV_EXTRACT : RISCV::CV_EXTRACTU;
+  SDLoc DL(N);
+  MVT XLenVT = Subtarget->getXLenVT();
+  MVT VT = N->getSimpleValueType(0);
+
+  // For unsigned extracts, check for a shift right and mask
+  unsigned AndImm = 0;
+  if (N->getOpcode() == ISD::AND) {
+    if (isOpcWithIntImmediate(N, ISD::AND, AndImm)) {
+
+      // The immediate is a mask of the low bits iff imm & (imm+1) == 0
+      if (AndImm & (AndImm + 1))
+        return false;
+
+      unsigned Srl_imm = 0;
+      if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL,
+                                Srl_imm)) {
+        assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
+
+        // Mask off the unnecessary bits of the AND immediate; normally
+        // DAGCombine will do this, but that might not happen if
+        // targetShrinkDemandedConstant chooses a different immediate.
+        AndImm &= -1U >> Srl_imm;
+
+        // Note: The width operand is encoded as width-1.
+        unsigned Width = countTrailingOnes(AndImm) - 1;
+        unsigned LSB = Srl_imm;
+
+        if ((LSB + Width + 1) == N->getValueType(0).getSizeInBits()) {
+          Opc = IsSigned ? RISCV::SRA : RISCV::SRL;
+          SDNode *NewNode = CurDAG->getMachineNode(
+            Opc, DL, VT, N->getOperand(0).getOperand(0));
+          ReplaceNode(N, NewNode);
+          return true;
+        }
+
+      assert(LSB + Width + 1 <= 32 && "cv.extract width will get shrank");
+        SDNode *NewNode = CurDAG->getMachineNode(
+          Opc, DL, VT, N->getOperand(0).getOperand(0),
+          CurDAG->getTargetConstant(Width, DL, XLenVT),
+          CurDAG->getTargetConstant(LSB, DL, XLenVT));
+        ReplaceNode(N, NewNode);
+        return true;
+      }
+    }
+    return false;
+  }
+
+  // Otherwise, we're looking for a shift of a shift
+  unsigned Shl_imm = 0;
+  if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
+    assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!");
+    unsigned Srl_imm = 0;
+    if (isInt32Immediate(N->getOperand(1), Srl_imm)) {
+      assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
+      unsigned Width = 32 - Srl_imm - 1;
+      int LSB = Srl_imm - Shl_imm;
+      if (LSB < 0)
+        return false;
+      assert(LSB + Width + 1 <= 32 && "cv.extract width will get shrank");
+      SDNode *NewNode = CurDAG->getMachineNode(
+        Opc, DL, VT, N->getOperand(0).getOperand(0),
+        CurDAG->getTargetConstant(Width, DL, XLenVT),
+        CurDAG->getTargetConstant(LSB, DL, XLenVT));
+      ReplaceNode(N, NewNode);
+      return true;
+    }
+  }
+
+  // Or we are looking for a shift of an and, with a mask operand
+  if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, AndImm) &&
+      isShiftedMask_32(AndImm)) {
+    unsigned Srl_imm = 0;
+    unsigned LSB = countTrailingZeros(AndImm);
+    // Shift must be the same as the ands lsb
+    if (isInt32Immediate(N->getOperand(1), Srl_imm) && Srl_imm == LSB) {
+      assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
+      unsigned MSB = 31 - countLeadingZeros(AndImm);
+      unsigned Width = MSB - LSB;
+      assert(Srl_imm + Width + 1 <= 32 && "cv.extract width will get shrank");
+      SDNode *NewNode = CurDAG->getMachineNode(
+        Opc, DL, VT, N->getOperand(0).getOperand(0),
+        CurDAG->getTargetConstant(Width, DL, XLenVT),
+          CurDAG->getTargetConstant(Srl_imm, DL, XLenVT));
+      ReplaceNode(N, NewNode);
+      return true;
+    }
+  }
+
+  return false;
+}
+
 void RISCVDAGToDAGISel::Select(SDNode *Node) {
   // If we have a custom node, we have already selected.
   if (Node->isMachineOpcode()) {
@@ -717,6 +836,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
     return;
   }
   case ISD::SHL: {
+    if (tryXCVbitmanipExtractOp(Node, false))
+      return;
     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
     if (!N1C)
       break;
@@ -747,6 +868,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
     break;
   }
   case ISD::SRL: {
+    if (tryXCVbitmanipExtractOp(Node, false))
+      return;
     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
     if (!N1C)
       break;
@@ -819,6 +942,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
     return;
   }
   case ISD::SRA: {
+    if (tryXCVbitmanipExtractOp(Node, true))
+      return;
     // Optimize (sra (sext_inreg X, i16), C) ->
     //          (srai (slli X, (XLen-16), (XLen-16) + C)
     // And      (sra (sext_inreg X, i8), C) ->
@@ -856,6 +981,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
 
     break;
   case ISD::AND: {
+    if (tryXCVbitmanipExtractOp(Node, false))
+      return;
     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
     if (!N1C)
       break;
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
index 15e1a1ef36cf86b76f38ce016d1081fb44e1fd70..24ab2470488ab58d57d916fb53abff9fee51f866 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
@@ -53,6 +53,8 @@ public:
 
   bool tryShrinkShlLogicImm(SDNode *Node);
 
+  bool tryXCVbitmanipExtractOp(SDNode *N, bool isSigned);
+
   bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt);
   bool selectShiftMaskXLen(SDValue N, SDValue &ShAmt) {
     return selectShiftMask(N, Subtarget->getXLen(), ShAmt);
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index a40eea7aee56e61cf19bcc72209036c0c3a20694..79390ee0f5b27eb2ccd45e53184ed8dea365e10f 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -432,6 +432,10 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
   }
 
+  if (Subtarget.hasExtXcvbitmanip()) {
+    setOperationAction(ISD::CTPOP, XLenVT, Legal);
+  }
+
   if (Subtarget.hasExtXcvmem()) {
     setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal);
     setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal);
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoCOREV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoCOREV.td
index 7ba6294de0163f6edee72035b964c07bb0965d7c..41c7daaa45d3bbeeca12ccfa0d8bbc4296b18e69 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoCOREV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoCOREV.td
@@ -1226,6 +1226,7 @@ let Predicates = [HasExtXcvbitmanip, IsRV32] in {
   
   def : Pat<(int_riscv_cv_bitmanip_bitrev GPR:$rs1, cv_tuimm5:$pts, cv_tuimm2:$radix),
             (CV_BITREV GPR:$rs1, cv_tuimm2:$radix, cv_tuimm5:$pts)>;
+  def : PatGpr<ctpop, CV_CNT>;
 }
 
 let Predicates = [HasExtXcvelw, IsRV32] in {
diff --git a/llvm/test/CodeGen/RISCV/corev/bitmanip.ll b/llvm/test/CodeGen/RISCV/corev/bitmanip.ll
index ee28e26dad0f9355509154f836f0526b55b3869b..69d8de276a7ec4d7c99482d028b29b449d8e4221 100644
--- a/llvm/test/CodeGen/RISCV/corev/bitmanip.ll
+++ b/llvm/test/CodeGen/RISCV/corev/bitmanip.ll
@@ -155,6 +155,17 @@ define i32 @test.cv.cnt(i32 %a) {
   ret i32 %1
 }
 
+declare i32 @llvm.ctpop.i32(i32)
+
+define i32 @ctpop_i32(i32 %a) {
+; CHECK-LABEL: ctpop_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    cv.cnt a0, a0
+; CHECK-NEXT:    ret
+  %1 = call i32 @llvm.ctpop.i32(i32 %a)
+  ret i32 %1
+}
+
 declare i32 @llvm.fshr.i32(i32, i32, i32)
 
 define i32 @test.cv.ror(i32 %a, i32 %b) {
@@ -176,3 +187,134 @@ define i32 @test.cv.bitrev(i32 %a) {
   %1 = call i32 @llvm.riscv.cv.bitmanip.bitrev(i32 %a, i32 1, i32 2)
   ret i32 %1
 }
+
+define i32 @sbfx1(i32 %a) {
+; CHECK-LABEL: sbfx1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    cv.extract a0, a0, 10, 7
+; CHECK-NEXT:    ret
+	%t1 = lshr i32 %a, 7
+	%t2 = trunc i32 %t1 to i11
+	%t3 = sext i11 %t2 to i32
+	ret i32 %t3
+}
+
+define i32 @ubfx1(i32 %a) {
+; CHECK-LABEL: ubfx1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    cv.extractu a0, a0, 10, 7
+; CHECK-NEXT:    ret
+	%t1 = lshr i32 %a, 7
+	%t2 = trunc i32 %t1 to i11
+	%t3 = zext i11 %t2 to i32
+	ret i32 %t3
+}
+
+define i32 @ubfx2(i32 %a) {
+; CHECK-LABEL: ubfx2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    cv.extractu a0, a0, 10, 7
+; CHECK-NEXT:    ret
+	%t1 = lshr i32 %a, 7
+	%t2 = and i32 %t1, 2047
+	ret i32 %t2
+}
+
+define i32 @ubfx3(i32 %a) {
+; CHECK-LABEL: ubfx3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    cv.extractu a0, a0, 0, 11
+; CHECK-NEXT:    ret
+	%t1 = and i32 %a, 2048
+	%t2 = lshr i32 %t1, 11
+	ret i32 %t2
+}
+
+define i32 @ubfx4(i32 %a) {
+; CHECK-LABEL: ubfx4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    cv.extractu a0, a0, 2, 7
+; CHECK-NEXT:    ret
+	%t1 = and i32 %a, 896
+	%t2 = lshr i32 %t1, 7
+	ret i32 %t2
+}
+
+define i32 @f1(i32 %a) {
+; CHECK-LABEL: f1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    cv.extract a0, a0, 19, 0
+; CHECK-NEXT:    ret
+entry:
+    %tmp = shl i32 %a, 12
+    %tmp2 = ashr i32 %tmp, 12
+    ret i32 %tmp2
+}
+
+define i32 @f2(i32 %a) {
+; CHECK-LABEL: f2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    slli a0, a0, 12
+; CHECK-NEXT:    srli a0, a0, 12
+; CHECK-NEXT:    ret
+entry:
+    %tmp = shl i32 %a, 12
+    %tmp2 = lshr i32 %tmp, 12
+    ret i32 %tmp2
+}
+
+define i32 @f3(i32 %a) {
+; CHECK-LABEL: f3:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    cv.extract a0, a0, 2, 5
+; CHECK-NEXT:    ret
+entry:
+    %tmp = shl i32 %a, 24
+    %tmp2 = ashr i32 %tmp, 29
+    ret i32 %tmp2
+}
+
+define i32 @f4(i32 %a) {
+; CHECK-LABEL: f4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    cv.extractu a0, a0, 2, 5
+; CHECK-NEXT:    ret
+entry:
+    %tmp = shl i32 %a, 24
+    %tmp2 = lshr i32 %tmp, 29
+    ret i32 %tmp2
+}
+
+define i32 @f5(i32 %a) {
+; CHECK-LABEL: f5:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    srai a0, a0, 1
+; CHECK-NEXT:    ret
+entry:
+    %tmp = shl i32 %a, 3
+    %tmp2 = ashr i32 %tmp, 1
+    ret i32 %tmp2
+}
+
+define signext i8 @f6(i32 %a) {
+; CHECK-LABEL: f6:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    cv.extract a0, a0, 7, 23
+; CHECK-NEXT:    ret
+
+  %tmp = lshr i32 %a, 23
+  %res = trunc i32 %tmp to i8
+  ret i8 %res
+}
+
+define signext i8 @f7(i32 %a) {
+; CHECK-LABEL: f7:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    srli a0, a0, 25
+; CHECK-NEXT:    ret
+
+  %tmp = lshr i32 %a, 25
+  %res = trunc i32 %tmp to i8
+  ret i8 %res
+}